2024-12-05 12:02:03,222 main DEBUG Apache Log4j Core 2.17.2 initializing configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@5c7933ad 2024-12-05 12:02:03,251 main DEBUG Took 0.022866 seconds to load 1 plugins from package org.apache.hadoop.hbase.logging 2024-12-05 12:02:03,252 main DEBUG PluginManager 'Core' found 129 plugins 2024-12-05 12:02:03,253 main DEBUG PluginManager 'Level' found 0 plugins 2024-12-05 12:02:03,256 main DEBUG PluginManager 'Lookup' found 16 plugins 2024-12-05 12:02:03,258 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-05 12:02:03,271 main DEBUG PluginManager 'TypeConverter' found 26 plugins 2024-12-05 12:02:03,296 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.util.MBeans", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-05 12:02:03,297 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-05 12:02:03,298 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.logging.TestJul2Slf4j", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-05 12:02:03,299 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-05 12:02:03,300 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.zookeeper", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-05 12:02:03,300 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-05 12:02:03,302 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSinkAdapter", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-05 12:02:03,302 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-05 12:02:03,303 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSystemImpl", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-05 12:02:03,303 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-05 12:02:03,304 main DEBUG LoggerConfig$Builder(additivity="false", level="WARN", levelAndRefs="null", name="org.apache.directory", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-05 12:02:03,305 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-05 12:02:03,306 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.ipc.FailedServers", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-05 12:02:03,307 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-05 12:02:03,309 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsConfig", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-05 12:02:03,310 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-05 12:02:03,311 main DEBUG LoggerConfig$Builder(additivity="null", level="INFO", levelAndRefs="null", name="org.apache.hadoop.hbase.ScheduledChore", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-05 12:02:03,312 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-05 12:02:03,312 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.regionserver.RSRpcServices", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-05 12:02:03,313 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-05 12:02:03,314 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-05 12:02:03,317 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-05 12:02:03,319 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-05 12:02:03,319 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-05 12:02:03,321 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hbase.thirdparty.io.netty.channel", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-05 12:02:03,322 main DEBUG Building Plugin[name=root, class=org.apache.logging.log4j.core.config.LoggerConfig$RootLogger]. 2024-12-05 12:02:03,325 main DEBUG LoggerConfig$RootLogger$Builder(additivity="null", level="null", levelAndRefs="INFO,Console", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-05 12:02:03,327 main DEBUG Building Plugin[name=loggers, class=org.apache.logging.log4j.core.config.LoggersPlugin]. 2024-12-05 12:02:03,329 main DEBUG createLoggers(={org.apache.hadoop.metrics2.util.MBeans, org.apache.hadoop.hbase.logging.TestJul2Slf4j, org.apache.zookeeper, org.apache.hadoop.metrics2.impl.MetricsSinkAdapter, org.apache.hadoop.metrics2.impl.MetricsSystemImpl, org.apache.directory, org.apache.hadoop.hbase.ipc.FailedServers, org.apache.hadoop.metrics2.impl.MetricsConfig, org.apache.hadoop.hbase.ScheduledChore, org.apache.hadoop.hbase.regionserver.RSRpcServices, org.apache.hadoop, org.apache.hadoop.hbase, org.apache.hbase.thirdparty.io.netty.channel, root}) 2024-12-05 12:02:03,331 main DEBUG Building Plugin[name=layout, class=org.apache.logging.log4j.core.layout.PatternLayout]. 2024-12-05 12:02:03,332 main DEBUG PatternLayout$Builder(pattern="%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n", PatternSelector=null, Configuration(PropertiesConfig), Replace=null, charset="null", alwaysWriteExceptions="null", disableAnsi="null", noConsoleNoAnsi="null", header="null", footer="null") 2024-12-05 12:02:03,333 main DEBUG PluginManager 'Converter' found 47 plugins 2024-12-05 12:02:03,344 main DEBUG Building Plugin[name=appender, class=org.apache.hadoop.hbase.logging.HBaseTestAppender]. 2024-12-05 12:02:03,348 main DEBUG HBaseTestAppender$Builder(target="SYSTEM_ERR", maxSize="1G", bufferedIo="null", bufferSize="null", immediateFlush="null", ignoreExceptions="null", PatternLayout(%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n), name="Console", Configuration(PropertiesConfig), Filter=null, ={}) 2024-12-05 12:02:03,353 main DEBUG Starting HBaseTestOutputStreamManager SYSTEM_ERR 2024-12-05 12:02:03,354 main DEBUG Building Plugin[name=appenders, class=org.apache.logging.log4j.core.config.AppendersPlugin]. 2024-12-05 12:02:03,354 main DEBUG createAppenders(={Console}) 2024-12-05 12:02:03,355 main DEBUG Configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@5c7933ad initialized 2024-12-05 12:02:03,356 main DEBUG Starting configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@5c7933ad 2024-12-05 12:02:03,356 main DEBUG Started configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@5c7933ad OK. 2024-12-05 12:02:03,357 main DEBUG Shutting down OutputStreamManager SYSTEM_OUT.false.false-1 2024-12-05 12:02:03,357 main DEBUG OutputStream closed 2024-12-05 12:02:03,357 main DEBUG Shut down OutputStreamManager SYSTEM_OUT.false.false-1, all resources released: true 2024-12-05 12:02:03,358 main DEBUG Appender DefaultConsole-1 stopped with status true 2024-12-05 12:02:03,358 main DEBUG Stopped org.apache.logging.log4j.core.config.DefaultConfiguration@7c711375 OK 2024-12-05 12:02:03,498 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6 2024-12-05 12:02:03,509 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=StatusLogger 2024-12-05 12:02:03,511 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=ContextSelector 2024-12-05 12:02:03,512 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name= 2024-12-05 12:02:03,513 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.directory 2024-12-05 12:02:03,514 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSinkAdapter 2024-12-05 12:02:03,515 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.zookeeper 2024-12-05 12:02:03,515 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.logging.TestJul2Slf4j 2024-12-05 12:02:03,516 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSystemImpl 2024-12-05 12:02:03,516 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.util.MBeans 2024-12-05 12:02:03,517 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase 2024-12-05 12:02:03,517 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop 2024-12-05 12:02:03,518 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ipc.FailedServers 2024-12-05 12:02:03,518 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.regionserver.RSRpcServices 2024-12-05 12:02:03,519 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsConfig 2024-12-05 12:02:03,520 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hbase.thirdparty.io.netty.channel 2024-12-05 12:02:03,521 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ScheduledChore 2024-12-05 12:02:03,522 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Appenders,name=Console 2024-12-05 12:02:03,526 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-12-05 12:02:03,526 main DEBUG Reconfiguration complete for context[name=1dbd16a6] at URI jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-logging/target/hbase-logging-4.0.0-alpha-1-SNAPSHOT-tests.jar!/log4j2.properties (org.apache.logging.log4j.core.LoggerContext@554e218) with optional ClassLoader: null 2024-12-05 12:02:03,527 main DEBUG Shutdown hook enabled. Registering a new one. 2024-12-05 12:02:03,528 main DEBUG LoggerContext[name=1dbd16a6, org.apache.logging.log4j.core.LoggerContext@554e218] started OK. 2024-12-05T12:02:03,547 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.snapshot.TestExportSnapshot timeout: 13 mins 2024-12-05 12:02:03,551 main DEBUG AsyncLogger.ThreadNameStrategy=UNCACHED (user specified null, default is UNCACHED) 2024-12-05 12:02:03,552 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-12-05T12:02:04,072 DEBUG [main {}] hbase.HBaseTestingUtil(323): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/b25dcafd-a00c-c924-93f8-4d057d2ca201 2024-12-05T12:02:04,073 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.snapshot.TestMobExportSnapshot timeout: 13 mins 2024-12-05T12:02:04,073 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.snapshot.TestMobSecureExportSnapshot timeout: 13 mins 2024-12-05T12:02:04,169 WARN [Time-limited test {}] util.NativeCodeLoader(60): Unable to load native-hadoop library for your platform... using builtin-java classes where applicable 2024-12-05T12:02:04,446 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=3, rsPorts=, rsClass=null, numDataNodes=3, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-05T12:02:04,466 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/b25dcafd-a00c-c924-93f8-4d057d2ca201/cluster_25076363-b5ee-b0f5-182c-ca76f44a18a7, deleteOnExit=true 2024-12-05T12:02:04,467 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-12-05T12:02:04,469 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/b25dcafd-a00c-c924-93f8-4d057d2ca201/test.cache.data in system properties and HBase conf 2024-12-05T12:02:04,470 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/b25dcafd-a00c-c924-93f8-4d057d2ca201/hadoop.tmp.dir in system properties and HBase conf 2024-12-05T12:02:04,470 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/b25dcafd-a00c-c924-93f8-4d057d2ca201/hadoop.log.dir in system properties and HBase conf 2024-12-05T12:02:04,471 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/b25dcafd-a00c-c924-93f8-4d057d2ca201/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-05T12:02:04,472 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/b25dcafd-a00c-c924-93f8-4d057d2ca201/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-05T12:02:04,472 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-12-05T12:02:04,573 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-05T12:02:04,578 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/b25dcafd-a00c-c924-93f8-4d057d2ca201/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-05T12:02:04,578 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/b25dcafd-a00c-c924-93f8-4d057d2ca201/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-05T12:02:04,579 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/b25dcafd-a00c-c924-93f8-4d057d2ca201/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-05T12:02:04,580 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/b25dcafd-a00c-c924-93f8-4d057d2ca201/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-05T12:02:04,580 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/b25dcafd-a00c-c924-93f8-4d057d2ca201/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-05T12:02:04,581 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/b25dcafd-a00c-c924-93f8-4d057d2ca201/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-05T12:02:04,581 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/b25dcafd-a00c-c924-93f8-4d057d2ca201/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-05T12:02:04,582 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/b25dcafd-a00c-c924-93f8-4d057d2ca201/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-05T12:02:04,582 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/b25dcafd-a00c-c924-93f8-4d057d2ca201/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-05T12:02:04,583 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/b25dcafd-a00c-c924-93f8-4d057d2ca201/nfs.dump.dir in system properties and HBase conf 2024-12-05T12:02:04,583 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/b25dcafd-a00c-c924-93f8-4d057d2ca201/java.io.tmpdir in system properties and HBase conf 2024-12-05T12:02:04,584 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/b25dcafd-a00c-c924-93f8-4d057d2ca201/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-05T12:02:04,584 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/b25dcafd-a00c-c924-93f8-4d057d2ca201/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-05T12:02:04,585 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/b25dcafd-a00c-c924-93f8-4d057d2ca201/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-05T12:02:05,838 WARN [Time-limited test {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-namenode.properties,hadoop-metrics2.properties 2024-12-05T12:02:05,952 INFO [Time-limited test {}] log.Log(170): Logging initialized @3710ms to org.eclipse.jetty.util.log.Slf4jLog 2024-12-05T12:02:06,066 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-05T12:02:06,158 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-05T12:02:06,195 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-05T12:02:06,196 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-05T12:02:06,199 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-05T12:02:06,223 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-05T12:02:06,241 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@72770802{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/b25dcafd-a00c-c924-93f8-4d057d2ca201/hadoop.log.dir/,AVAILABLE} 2024-12-05T12:02:06,243 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@15a5d53b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-05T12:02:06,514 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@150ffd7b{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/b25dcafd-a00c-c924-93f8-4d057d2ca201/java.io.tmpdir/jetty-localhost-44819-hadoop-hdfs-3_4_1-tests_jar-_-any-7723982957051882957/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-05T12:02:06,522 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@322d032a{HTTP/1.1, (http/1.1)}{localhost:44819} 2024-12-05T12:02:06,523 INFO [Time-limited test {}] server.Server(415): Started @4282ms 2024-12-05T12:02:07,149 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-05T12:02:07,158 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-05T12:02:07,162 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-05T12:02:07,163 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-05T12:02:07,163 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-05T12:02:07,165 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@8da11ec{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/b25dcafd-a00c-c924-93f8-4d057d2ca201/hadoop.log.dir/,AVAILABLE} 2024-12-05T12:02:07,166 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6b6c5d7e{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-05T12:02:07,280 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@22e5dde{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/b25dcafd-a00c-c924-93f8-4d057d2ca201/java.io.tmpdir/jetty-localhost-44277-hadoop-hdfs-3_4_1-tests_jar-_-any-15788416347209220565/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-05T12:02:07,281 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@3cbfa094{HTTP/1.1, (http/1.1)}{localhost:44277} 2024-12-05T12:02:07,281 INFO [Time-limited test {}] server.Server(415): Started @5041ms 2024-12-05T12:02:07,386 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-05T12:02:07,545 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-05T12:02:07,557 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-05T12:02:07,569 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-05T12:02:07,570 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-05T12:02:07,570 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-05T12:02:07,571 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@428a9356{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/b25dcafd-a00c-c924-93f8-4d057d2ca201/hadoop.log.dir/,AVAILABLE} 2024-12-05T12:02:07,572 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@8c924b4{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-05T12:02:07,693 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@70601096{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/b25dcafd-a00c-c924-93f8-4d057d2ca201/java.io.tmpdir/jetty-localhost-36661-hadoop-hdfs-3_4_1-tests_jar-_-any-12032096988262442049/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-05T12:02:07,694 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@68f62052{HTTP/1.1, (http/1.1)}{localhost:36661} 2024-12-05T12:02:07,695 INFO [Time-limited test {}] server.Server(415): Started @5454ms 2024-12-05T12:02:07,698 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-05T12:02:07,782 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-05T12:02:07,792 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-05T12:02:07,796 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-05T12:02:07,796 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-05T12:02:07,797 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-05T12:02:07,803 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@33104ee0{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/b25dcafd-a00c-c924-93f8-4d057d2ca201/hadoop.log.dir/,AVAILABLE} 2024-12-05T12:02:07,804 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@49217e61{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-05T12:02:07,910 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@1865ebea{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/b25dcafd-a00c-c924-93f8-4d057d2ca201/java.io.tmpdir/jetty-localhost-45127-hadoop-hdfs-3_4_1-tests_jar-_-any-5092294400049701377/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-05T12:02:07,911 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@2fadfd2c{HTTP/1.1, (http/1.1)}{localhost:45127} 2024-12-05T12:02:07,911 INFO [Time-limited test {}] server.Server(415): Started @5671ms 2024-12-05T12:02:07,914 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-05T12:02:08,719 WARN [Thread-125 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/b25dcafd-a00c-c924-93f8-4d057d2ca201/cluster_25076363-b5ee-b0f5-182c-ca76f44a18a7/data/data2/current/BP-1735945732-172.17.0.2-1733400125338/current, will proceed with Du for space computation calculation, 2024-12-05T12:02:08,719 WARN [Thread-124 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/b25dcafd-a00c-c924-93f8-4d057d2ca201/cluster_25076363-b5ee-b0f5-182c-ca76f44a18a7/data/data4/current/BP-1735945732-172.17.0.2-1733400125338/current, will proceed with Du for space computation calculation, 2024-12-05T12:02:08,719 WARN [Thread-122 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/b25dcafd-a00c-c924-93f8-4d057d2ca201/cluster_25076363-b5ee-b0f5-182c-ca76f44a18a7/data/data3/current/BP-1735945732-172.17.0.2-1733400125338/current, will proceed with Du for space computation calculation, 2024-12-05T12:02:08,719 WARN [Thread-123 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/b25dcafd-a00c-c924-93f8-4d057d2ca201/cluster_25076363-b5ee-b0f5-182c-ca76f44a18a7/data/data1/current/BP-1735945732-172.17.0.2-1733400125338/current, will proceed with Du for space computation calculation, 2024-12-05T12:02:08,755 WARN [Thread-81 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-05T12:02:08,757 WARN [Thread-58 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-05T12:02:08,810 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x12494cff14e3a36a with lease ID 0xcef35cedff1af5e9: Processing first storage report for DS-3b5287fb-7177-4c9b-b36f-a90d42a8b13c from datanode DatanodeRegistration(127.0.0.1:39603, datanodeUuid=1f0bfc6e-34af-4444-98e7-cd02286dedfa, infoPort=40273, infoSecurePort=0, ipcPort=37503, storageInfo=lv=-57;cid=testClusterID;nsid=1372580816;c=1733400125338) 2024-12-05T12:02:08,811 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x12494cff14e3a36a with lease ID 0xcef35cedff1af5e9: from storage DS-3b5287fb-7177-4c9b-b36f-a90d42a8b13c node DatanodeRegistration(127.0.0.1:39603, datanodeUuid=1f0bfc6e-34af-4444-98e7-cd02286dedfa, infoPort=40273, infoSecurePort=0, ipcPort=37503, storageInfo=lv=-57;cid=testClusterID;nsid=1372580816;c=1733400125338), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-05T12:02:08,812 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x8d424d82c244abd8 with lease ID 0xcef35cedff1af5e8: Processing first storage report for DS-ee047669-e603-4685-b719-a5ac8b615655 from datanode DatanodeRegistration(127.0.0.1:37713, datanodeUuid=7003692e-dcbf-496a-a497-c6b12c3641e7, infoPort=46789, infoSecurePort=0, ipcPort=41019, storageInfo=lv=-57;cid=testClusterID;nsid=1372580816;c=1733400125338) 2024-12-05T12:02:08,812 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x8d424d82c244abd8 with lease ID 0xcef35cedff1af5e8: from storage DS-ee047669-e603-4685-b719-a5ac8b615655 node DatanodeRegistration(127.0.0.1:37713, datanodeUuid=7003692e-dcbf-496a-a497-c6b12c3641e7, infoPort=46789, infoSecurePort=0, ipcPort=41019, storageInfo=lv=-57;cid=testClusterID;nsid=1372580816;c=1733400125338), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-05T12:02:08,812 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x12494cff14e3a36a with lease ID 0xcef35cedff1af5e9: Processing first storage report for DS-d8736375-c2c1-4747-a7b2-3259cef2c977 from datanode DatanodeRegistration(127.0.0.1:39603, datanodeUuid=1f0bfc6e-34af-4444-98e7-cd02286dedfa, infoPort=40273, infoSecurePort=0, ipcPort=37503, storageInfo=lv=-57;cid=testClusterID;nsid=1372580816;c=1733400125338) 2024-12-05T12:02:08,813 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x12494cff14e3a36a with lease ID 0xcef35cedff1af5e9: from storage DS-d8736375-c2c1-4747-a7b2-3259cef2c977 node DatanodeRegistration(127.0.0.1:39603, datanodeUuid=1f0bfc6e-34af-4444-98e7-cd02286dedfa, infoPort=40273, infoSecurePort=0, ipcPort=37503, storageInfo=lv=-57;cid=testClusterID;nsid=1372580816;c=1733400125338), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-05T12:02:08,813 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x8d424d82c244abd8 with lease ID 0xcef35cedff1af5e8: Processing first storage report for DS-b81dda48-3673-4217-81fd-cf0299739f68 from datanode DatanodeRegistration(127.0.0.1:37713, datanodeUuid=7003692e-dcbf-496a-a497-c6b12c3641e7, infoPort=46789, infoSecurePort=0, ipcPort=41019, storageInfo=lv=-57;cid=testClusterID;nsid=1372580816;c=1733400125338) 2024-12-05T12:02:08,813 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x8d424d82c244abd8 with lease ID 0xcef35cedff1af5e8: from storage DS-b81dda48-3673-4217-81fd-cf0299739f68 node DatanodeRegistration(127.0.0.1:37713, datanodeUuid=7003692e-dcbf-496a-a497-c6b12c3641e7, infoPort=46789, infoSecurePort=0, ipcPort=41019, storageInfo=lv=-57;cid=testClusterID;nsid=1372580816;c=1733400125338), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-05T12:02:08,921 WARN [Thread-143 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/b25dcafd-a00c-c924-93f8-4d057d2ca201/cluster_25076363-b5ee-b0f5-182c-ca76f44a18a7/data/data6/current/BP-1735945732-172.17.0.2-1733400125338/current, will proceed with Du for space computation calculation, 2024-12-05T12:02:08,921 WARN [Thread-142 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/b25dcafd-a00c-c924-93f8-4d057d2ca201/cluster_25076363-b5ee-b0f5-182c-ca76f44a18a7/data/data5/current/BP-1735945732-172.17.0.2-1733400125338/current, will proceed with Du for space computation calculation, 2024-12-05T12:02:08,951 WARN [Thread-103 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-05T12:02:08,956 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x4006141e669143b0 with lease ID 0xcef35cedff1af5ea: Processing first storage report for DS-629f3698-fec0-408e-9eb6-41b26b8f31e2 from datanode DatanodeRegistration(127.0.0.1:35565, datanodeUuid=bda4ce52-ded1-49f3-a81c-39967248aef2, infoPort=35055, infoSecurePort=0, ipcPort=44125, storageInfo=lv=-57;cid=testClusterID;nsid=1372580816;c=1733400125338) 2024-12-05T12:02:08,957 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x4006141e669143b0 with lease ID 0xcef35cedff1af5ea: from storage DS-629f3698-fec0-408e-9eb6-41b26b8f31e2 node DatanodeRegistration(127.0.0.1:35565, datanodeUuid=bda4ce52-ded1-49f3-a81c-39967248aef2, infoPort=35055, infoSecurePort=0, ipcPort=44125, storageInfo=lv=-57;cid=testClusterID;nsid=1372580816;c=1733400125338), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-05T12:02:08,957 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x4006141e669143b0 with lease ID 0xcef35cedff1af5ea: Processing first storage report for DS-074d3e34-97c7-4988-ab62-1874e04938a1 from datanode DatanodeRegistration(127.0.0.1:35565, datanodeUuid=bda4ce52-ded1-49f3-a81c-39967248aef2, infoPort=35055, infoSecurePort=0, ipcPort=44125, storageInfo=lv=-57;cid=testClusterID;nsid=1372580816;c=1733400125338) 2024-12-05T12:02:08,957 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x4006141e669143b0 with lease ID 0xcef35cedff1af5ea: from storage DS-074d3e34-97c7-4988-ab62-1874e04938a1 node DatanodeRegistration(127.0.0.1:35565, datanodeUuid=bda4ce52-ded1-49f3-a81c-39967248aef2, infoPort=35055, infoSecurePort=0, ipcPort=44125, storageInfo=lv=-57;cid=testClusterID;nsid=1372580816;c=1733400125338), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-05T12:02:08,984 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/b25dcafd-a00c-c924-93f8-4d057d2ca201 2024-12-05T12:02:09,076 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/b25dcafd-a00c-c924-93f8-4d057d2ca201/cluster_25076363-b5ee-b0f5-182c-ca76f44a18a7/zookeeper_0, clientPort=49240, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/b25dcafd-a00c-c924-93f8-4d057d2ca201/cluster_25076363-b5ee-b0f5-182c-ca76f44a18a7/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/b25dcafd-a00c-c924-93f8-4d057d2ca201/cluster_25076363-b5ee-b0f5-182c-ca76f44a18a7/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-05T12:02:09,086 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=49240 2024-12-05T12:02:09,098 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-05T12:02:09,101 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-05T12:02:09,388 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073741825_1001 (size=7) 2024-12-05T12:02:09,390 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073741825_1001 (size=7) 2024-12-05T12:02:09,390 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073741825_1001 (size=7) 2024-12-05T12:02:09,795 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300 with version=8 2024-12-05T12:02:09,795 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1139): Setting hbase.fs.tmp.dir to hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/hbase-staging 2024-12-05T12:02:09,884 DEBUG [Time-limited test {}] channel.MultithreadEventLoopGroup(44): -Dio.netty.eventLoopThreads: 16 2024-12-05T12:02:10,119 INFO [Time-limited test {}] client.ConnectionUtils(128): master/80666a11a09f:0 server-side Connection retries=45 2024-12-05T12:02:10,129 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-05T12:02:10,129 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-05T12:02:10,135 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-05T12:02:10,135 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-05T12:02:10,135 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-05T12:02:10,295 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-12-05T12:02:10,365 INFO [Time-limited test {}] metrics.MetricRegistriesLoader(60): Loaded MetricRegistries class org.apache.hadoop.hbase.metrics.impl.MetricRegistriesImpl 2024-12-05T12:02:10,377 DEBUG [Time-limited test {}] util.ClassSize(228): Using Unsafe to estimate memory layout 2024-12-05T12:02:10,383 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-05T12:02:10,420 DEBUG [Time-limited test {}] channel.DefaultChannelId(84): -Dio.netty.processId: 120525 (auto-detected) 2024-12-05T12:02:10,422 DEBUG [Time-limited test {}] channel.DefaultChannelId(106): -Dio.netty.machineId: 02:42:ac:ff:fe:11:00:02 (auto-detected) 2024-12-05T12:02:10,449 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:34947 2024-12-05T12:02:10,484 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:34947 connecting to ZooKeeper ensemble=127.0.0.1:49240 2024-12-05T12:02:10,596 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:349470x0, quorum=127.0.0.1:49240, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-05T12:02:10,600 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:34947-0x101a6a890970000 connected 2024-12-05T12:02:10,682 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-05T12:02:10,685 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-05T12:02:10,698 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:34947-0x101a6a890970000, quorum=127.0.0.1:49240, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-05T12:02:10,704 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300, hbase.cluster.distributed=false 2024-12-05T12:02:10,752 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:34947-0x101a6a890970000, quorum=127.0.0.1:49240, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-05T12:02:10,758 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=34947 2024-12-05T12:02:10,758 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=34947 2024-12-05T12:02:10,763 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=34947 2024-12-05T12:02:10,764 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=34947 2024-12-05T12:02:10,767 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=34947 2024-12-05T12:02:10,884 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/80666a11a09f:0 server-side Connection retries=45 2024-12-05T12:02:10,886 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-05T12:02:10,886 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-05T12:02:10,886 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-05T12:02:10,887 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-05T12:02:10,887 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-05T12:02:10,891 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-05T12:02:10,894 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-05T12:02:10,895 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:42913 2024-12-05T12:02:10,897 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:42913 connecting to ZooKeeper ensemble=127.0.0.1:49240 2024-12-05T12:02:10,899 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-05T12:02:10,903 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-05T12:02:10,921 DEBUG [pool-63-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:429130x0, quorum=127.0.0.1:49240, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-05T12:02:10,922 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:429130x0, quorum=127.0.0.1:49240, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-05T12:02:10,922 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:42913-0x101a6a890970001 connected 2024-12-05T12:02:10,928 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-05T12:02:10,934 INFO [Time-limited test {}] mob.MobFileCache(128): MobFileCache disabled 2024-12-05T12:02:10,936 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:42913-0x101a6a890970001, quorum=127.0.0.1:49240, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-05T12:02:10,942 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:42913-0x101a6a890970001, quorum=127.0.0.1:49240, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-05T12:02:10,943 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=42913 2024-12-05T12:02:10,943 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=42913 2024-12-05T12:02:10,944 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=42913 2024-12-05T12:02:10,944 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=42913 2024-12-05T12:02:10,945 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=42913 2024-12-05T12:02:10,958 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/80666a11a09f:0 server-side Connection retries=45 2024-12-05T12:02:10,959 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-05T12:02:10,959 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-05T12:02:10,959 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-05T12:02:10,959 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-05T12:02:10,959 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-05T12:02:10,960 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-05T12:02:10,960 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-05T12:02:10,961 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:37629 2024-12-05T12:02:10,963 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:37629 connecting to ZooKeeper ensemble=127.0.0.1:49240 2024-12-05T12:02:10,964 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-05T12:02:10,966 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-05T12:02:10,979 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:376290x0, quorum=127.0.0.1:49240, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-05T12:02:10,980 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:376290x0, quorum=127.0.0.1:49240, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-05T12:02:10,981 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:37629-0x101a6a890970002 connected 2024-12-05T12:02:10,981 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-05T12:02:10,982 INFO [Time-limited test {}] mob.MobFileCache(128): MobFileCache disabled 2024-12-05T12:02:10,984 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:37629-0x101a6a890970002, quorum=127.0.0.1:49240, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-05T12:02:10,986 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:37629-0x101a6a890970002, quorum=127.0.0.1:49240, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-05T12:02:10,991 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=37629 2024-12-05T12:02:10,995 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=37629 2024-12-05T12:02:10,995 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=37629 2024-12-05T12:02:10,997 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=37629 2024-12-05T12:02:10,998 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=37629 2024-12-05T12:02:11,013 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/80666a11a09f:0 server-side Connection retries=45 2024-12-05T12:02:11,013 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-05T12:02:11,013 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-05T12:02:11,013 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-05T12:02:11,013 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-05T12:02:11,013 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-05T12:02:11,014 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-05T12:02:11,014 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-05T12:02:11,015 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:42305 2024-12-05T12:02:11,017 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:42305 connecting to ZooKeeper ensemble=127.0.0.1:49240 2024-12-05T12:02:11,018 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-05T12:02:11,021 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-05T12:02:11,034 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:423050x0, quorum=127.0.0.1:49240, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-05T12:02:11,035 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:42305-0x101a6a890970003, quorum=127.0.0.1:49240, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-05T12:02:11,035 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:42305-0x101a6a890970003 connected 2024-12-05T12:02:11,036 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-05T12:02:11,038 INFO [Time-limited test {}] mob.MobFileCache(128): MobFileCache disabled 2024-12-05T12:02:11,040 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:42305-0x101a6a890970003, quorum=127.0.0.1:49240, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-05T12:02:11,043 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:42305-0x101a6a890970003, quorum=127.0.0.1:49240, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-05T12:02:11,047 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=42305 2024-12-05T12:02:11,053 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=42305 2024-12-05T12:02:11,059 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=42305 2024-12-05T12:02:11,060 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=42305 2024-12-05T12:02:11,061 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=42305 2024-12-05T12:02:11,081 DEBUG [M:0;80666a11a09f:34947 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;80666a11a09f:34947 2024-12-05T12:02:11,083 INFO [master/80666a11a09f:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/80666a11a09f,34947,1733400129965 2024-12-05T12:02:11,097 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42305-0x101a6a890970003, quorum=127.0.0.1:49240, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-05T12:02:11,097 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34947-0x101a6a890970000, quorum=127.0.0.1:49240, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-05T12:02:11,097 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37629-0x101a6a890970002, quorum=127.0.0.1:49240, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-05T12:02:11,097 DEBUG [pool-63-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42913-0x101a6a890970001, quorum=127.0.0.1:49240, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-05T12:02:11,100 DEBUG [master/80666a11a09f:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:34947-0x101a6a890970000, quorum=127.0.0.1:49240, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/80666a11a09f,34947,1733400129965 2024-12-05T12:02:11,129 DEBUG [pool-63-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42913-0x101a6a890970001, quorum=127.0.0.1:49240, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-05T12:02:11,130 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34947-0x101a6a890970000, quorum=127.0.0.1:49240, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T12:02:11,130 DEBUG [pool-63-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42913-0x101a6a890970001, quorum=127.0.0.1:49240, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T12:02:11,130 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42305-0x101a6a890970003, quorum=127.0.0.1:49240, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-05T12:02:11,130 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37629-0x101a6a890970002, quorum=127.0.0.1:49240, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-05T12:02:11,130 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42305-0x101a6a890970003, quorum=127.0.0.1:49240, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T12:02:11,130 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37629-0x101a6a890970002, quorum=127.0.0.1:49240, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T12:02:11,132 DEBUG [master/80666a11a09f:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:34947-0x101a6a890970000, quorum=127.0.0.1:49240, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-05T12:02:11,134 INFO [master/80666a11a09f:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/80666a11a09f,34947,1733400129965 from backup master directory 2024-12-05T12:02:11,146 DEBUG [pool-63-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42913-0x101a6a890970001, quorum=127.0.0.1:49240, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-05T12:02:11,146 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42305-0x101a6a890970003, quorum=127.0.0.1:49240, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-05T12:02:11,146 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37629-0x101a6a890970002, quorum=127.0.0.1:49240, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-05T12:02:11,147 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34947-0x101a6a890970000, quorum=127.0.0.1:49240, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/80666a11a09f,34947,1733400129965 2024-12-05T12:02:11,147 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34947-0x101a6a890970000, quorum=127.0.0.1:49240, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-05T12:02:11,148 WARN [master/80666a11a09f:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-05T12:02:11,148 INFO [master/80666a11a09f:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=80666a11a09f,34947,1733400129965 2024-12-05T12:02:11,151 INFO [master/80666a11a09f:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating data MemStoreChunkPool with chunk size 2 MB, max count 396, initial count 0 2024-12-05T12:02:11,153 INFO [master/80666a11a09f:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating index MemStoreChunkPool with chunk size 204.80 KB, max count 440, initial count 0 2024-12-05T12:02:11,217 DEBUG [master/80666a11a09f:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/hbase.id] with ID: af3bc8f5-5d92-4279-9ca6-4b73c59b8f54 2024-12-05T12:02:11,217 DEBUG [master/80666a11a09f:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/.tmp/hbase.id 2024-12-05T12:02:11,237 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073741826_1002 (size=42) 2024-12-05T12:02:11,237 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073741826_1002 (size=42) 2024-12-05T12:02:11,237 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073741826_1002 (size=42) 2024-12-05T12:02:11,239 DEBUG [master/80666a11a09f:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/.tmp/hbase.id]:[hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/hbase.id] 2024-12-05T12:02:11,293 INFO [master/80666a11a09f:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-05T12:02:11,299 INFO [master/80666a11a09f:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-12-05T12:02:11,319 INFO [master/80666a11a09f:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 18ms. 2024-12-05T12:02:11,329 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42305-0x101a6a890970003, quorum=127.0.0.1:49240, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T12:02:11,329 DEBUG [pool-63-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42913-0x101a6a890970001, quorum=127.0.0.1:49240, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T12:02:11,329 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37629-0x101a6a890970002, quorum=127.0.0.1:49240, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T12:02:11,330 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34947-0x101a6a890970000, quorum=127.0.0.1:49240, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T12:02:11,355 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073741827_1003 (size=196) 2024-12-05T12:02:11,356 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073741827_1003 (size=196) 2024-12-05T12:02:11,357 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073741827_1003 (size=196) 2024-12-05T12:02:11,380 INFO [master/80666a11a09f:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-05T12:02:11,383 INFO [master/80666a11a09f:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-05T12:02:11,401 DEBUG [master/80666a11a09f:0:becomeActiveMaster {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(244): No decryptEncryptedDataEncryptionKey method in DFSClient, should be hadoop version with HDFS-12396 java.lang.NoSuchMethodException: org.apache.hadoop.hdfs.DFSClient.decryptEncryptedDataEncryptionKey(org.apache.hadoop.fs.FileEncryptionInfo) at java.lang.Class.getDeclaredMethod(Class.java:2675) ~[?:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelperWithoutHDFS12396(FanOutOneBlockAsyncDFSOutputSaslHelper.java:183) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelper(FanOutOneBlockAsyncDFSOutputSaslHelper.java:242) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.(FanOutOneBlockAsyncDFSOutputSaslHelper.java:253) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at java.lang.Class.forName0(Native Method) ~[?:?] at java.lang.Class.forName(Class.java:375) ~[?:?] at org.apache.hadoop.hbase.wal.AsyncFSWALProvider.load(AsyncFSWALProvider.java:150) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.getProviderClass(WALFactory.java:174) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.(WALFactory.java:262) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.(WALFactory.java:231) ~[classes/:?] at org.apache.hadoop.hbase.master.region.MasterRegion.create(MasterRegion.java:400) ~[classes/:?] at org.apache.hadoop.hbase.master.region.MasterRegionFactory.create(MasterRegionFactory.java:135) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.finishActiveMasterInitialization(HMaster.java:1003) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.startActiveMasterManager(HMaster.java:2535) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.lambda$run$0(HMaster.java:613) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.lambda$tracedRunnable$2(TraceUtil.java:155) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-05T12:02:11,407 INFO [master/80666a11a09f:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-05T12:02:11,455 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073741828_1004 (size=1189) 2024-12-05T12:02:11,456 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073741828_1004 (size=1189) 2024-12-05T12:02:11,457 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073741828_1004 (size=1189) 2024-12-05T12:02:11,479 INFO [master/80666a11a09f:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/MasterData/data/master/store 2024-12-05T12:02:11,568 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073741829_1005 (size=34) 2024-12-05T12:02:11,568 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073741829_1005 (size=34) 2024-12-05T12:02:11,569 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073741829_1005 (size=34) 2024-12-05T12:02:11,573 INFO [master/80666a11a09f:0:becomeActiveMaster {}] throttle.StoreHotnessProtector(112): StoreHotnessProtector is disabled. Set hbase.region.store.parallel.put.limit > 0 to enable, which may help mitigate load under heavy write pressure. 2024-12-05T12:02:11,576 DEBUG [master/80666a11a09f:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-05T12:02:11,577 DEBUG [master/80666a11a09f:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-05T12:02:11,577 INFO [master/80666a11a09f:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-05T12:02:11,577 DEBUG [master/80666a11a09f:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-05T12:02:11,578 DEBUG [master/80666a11a09f:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-05T12:02:11,579 DEBUG [master/80666a11a09f:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-05T12:02:11,579 INFO [master/80666a11a09f:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-05T12:02:11,580 DEBUG [master/80666a11a09f:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733400131577Disabling compacts and flushes for region at 1733400131577Disabling writes for close at 1733400131579 (+2 ms)Writing region close event to WAL at 1733400131579Closed at 1733400131579 2024-12-05T12:02:11,582 WARN [master/80666a11a09f:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/MasterData/data/master/store/.initializing 2024-12-05T12:02:11,582 DEBUG [master/80666a11a09f:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/MasterData/WALs/80666a11a09f,34947,1733400129965 2024-12-05T12:02:11,591 INFO [master/80666a11a09f:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-05T12:02:11,610 INFO [master/80666a11a09f:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=80666a11a09f%2C34947%2C1733400129965, suffix=, logDir=hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/MasterData/WALs/80666a11a09f,34947,1733400129965, archiveDir=hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/MasterData/oldWALs, maxLogs=10 2024-12-05T12:02:11,643 DEBUG [master/80666a11a09f:0:becomeActiveMaster {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/MasterData/WALs/80666a11a09f,34947,1733400129965/80666a11a09f%2C34947%2C1733400129965.1733400131616, exclude list is [], retry=0 2024-12-05T12:02:11,660 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:39603,DS-3b5287fb-7177-4c9b-b36f-a90d42a8b13c,DISK] 2024-12-05T12:02:11,660 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:37713,DS-ee047669-e603-4685-b719-a5ac8b615655,DISK] 2024-12-05T12:02:11,660 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:35565,DS-629f3698-fec0-408e-9eb6-41b26b8f31e2,DISK] 2024-12-05T12:02:11,663 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.ProtobufDecoder(117): Hadoop 3.3 and above shades protobuf. 2024-12-05T12:02:11,698 INFO [master/80666a11a09f:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/MasterData/WALs/80666a11a09f,34947,1733400129965/80666a11a09f%2C34947%2C1733400129965.1733400131616 2024-12-05T12:02:11,698 DEBUG [master/80666a11a09f:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:40273:40273),(127.0.0.1/127.0.0.1:35055:35055),(127.0.0.1/127.0.0.1:46789:46789)] 2024-12-05T12:02:11,699 DEBUG [master/80666a11a09f:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-05T12:02:11,699 DEBUG [master/80666a11a09f:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-05T12:02:11,703 DEBUG [master/80666a11a09f:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-05T12:02:11,705 DEBUG [master/80666a11a09f:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-05T12:02:11,744 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-05T12:02:11,771 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-05T12:02:11,775 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T12:02:11,778 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-05T12:02:11,779 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-05T12:02:11,784 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-05T12:02:11,784 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T12:02:11,786 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-05T12:02:11,786 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-05T12:02:11,791 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-05T12:02:11,791 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T12:02:11,793 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-05T12:02:11,793 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-05T12:02:11,802 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-05T12:02:11,803 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T12:02:11,804 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-05T12:02:11,805 DEBUG [master/80666a11a09f:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-12-05T12:02:11,810 DEBUG [master/80666a11a09f:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-05T12:02:11,812 DEBUG [master/80666a11a09f:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-05T12:02:11,819 DEBUG [master/80666a11a09f:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-12-05T12:02:11,819 DEBUG [master/80666a11a09f:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-12-05T12:02:11,824 DEBUG [master/80666a11a09f:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-05T12:02:11,829 DEBUG [master/80666a11a09f:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-05T12:02:11,835 DEBUG [master/80666a11a09f:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-05T12:02:11,836 INFO [master/80666a11a09f:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=68701524, jitterRate=0.023732483386993408}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-05T12:02:11,843 DEBUG [master/80666a11a09f:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1733400131718Initializing all the Stores at 1733400131720 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733400131721 (+1 ms)Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733400131722 (+1 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733400131722Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733400131722Cleaning up temporary data from old regions at 1733400131819 (+97 ms)Region opened successfully at 1733400131843 (+24 ms) 2024-12-05T12:02:11,847 INFO [master/80666a11a09f:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-05T12:02:11,887 DEBUG [master/80666a11a09f:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1275590d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=80666a11a09f/172.17.0.2:0 2024-12-05T12:02:11,920 INFO [master/80666a11a09f:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-12-05T12:02:11,932 INFO [master/80666a11a09f:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-05T12:02:11,932 INFO [master/80666a11a09f:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-05T12:02:11,935 INFO [master/80666a11a09f:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-05T12:02:11,936 INFO [master/80666a11a09f:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 1 msec 2024-12-05T12:02:11,942 INFO [master/80666a11a09f:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 5 msec 2024-12-05T12:02:11,942 INFO [master/80666a11a09f:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-05T12:02:11,966 INFO [master/80666a11a09f:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-05T12:02:11,975 DEBUG [master/80666a11a09f:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34947-0x101a6a890970000, quorum=127.0.0.1:49240, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-05T12:02:12,017 DEBUG [master/80666a11a09f:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-12-05T12:02:12,020 INFO [master/80666a11a09f:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-05T12:02:12,022 DEBUG [master/80666a11a09f:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34947-0x101a6a890970000, quorum=127.0.0.1:49240, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-05T12:02:12,029 DEBUG [master/80666a11a09f:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-12-05T12:02:12,031 INFO [master/80666a11a09f:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-05T12:02:12,035 DEBUG [master/80666a11a09f:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34947-0x101a6a890970000, quorum=127.0.0.1:49240, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-05T12:02:12,046 DEBUG [master/80666a11a09f:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-12-05T12:02:12,047 DEBUG [master/80666a11a09f:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34947-0x101a6a890970000, quorum=127.0.0.1:49240, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-05T12:02:12,054 DEBUG [master/80666a11a09f:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-12-05T12:02:12,072 DEBUG [master/80666a11a09f:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34947-0x101a6a890970000, quorum=127.0.0.1:49240, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-05T12:02:12,079 DEBUG [master/80666a11a09f:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-05T12:02:12,092 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42305-0x101a6a890970003, quorum=127.0.0.1:49240, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-05T12:02:12,092 DEBUG [pool-63-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42913-0x101a6a890970001, quorum=127.0.0.1:49240, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-05T12:02:12,092 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37629-0x101a6a890970002, quorum=127.0.0.1:49240, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-05T12:02:12,092 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34947-0x101a6a890970000, quorum=127.0.0.1:49240, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-05T12:02:12,092 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42305-0x101a6a890970003, quorum=127.0.0.1:49240, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T12:02:12,092 DEBUG [pool-63-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42913-0x101a6a890970001, quorum=127.0.0.1:49240, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T12:02:12,092 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37629-0x101a6a890970002, quorum=127.0.0.1:49240, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T12:02:12,092 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34947-0x101a6a890970000, quorum=127.0.0.1:49240, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T12:02:12,096 INFO [master/80666a11a09f:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=80666a11a09f,34947,1733400129965, sessionid=0x101a6a890970000, setting cluster-up flag (Was=false) 2024-12-05T12:02:12,121 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34947-0x101a6a890970000, quorum=127.0.0.1:49240, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T12:02:12,121 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37629-0x101a6a890970002, quorum=127.0.0.1:49240, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T12:02:12,121 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42305-0x101a6a890970003, quorum=127.0.0.1:49240, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T12:02:12,121 DEBUG [pool-63-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42913-0x101a6a890970001, quorum=127.0.0.1:49240, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T12:02:12,146 DEBUG [master/80666a11a09f:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-05T12:02:12,148 DEBUG [master/80666a11a09f:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=80666a11a09f,34947,1733400129965 2024-12-05T12:02:12,271 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37629-0x101a6a890970002, quorum=127.0.0.1:49240, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T12:02:12,271 DEBUG [pool-63-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42913-0x101a6a890970001, quorum=127.0.0.1:49240, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T12:02:12,271 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42305-0x101a6a890970003, quorum=127.0.0.1:49240, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T12:02:12,271 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34947-0x101a6a890970000, quorum=127.0.0.1:49240, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T12:02:12,296 DEBUG [master/80666a11a09f:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-05T12:02:12,297 DEBUG [master/80666a11a09f:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=80666a11a09f,34947,1733400129965 2024-12-05T12:02:12,305 INFO [master/80666a11a09f:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-12-05T12:02:12,342 DEBUG [master/80666a11a09f:0:becomeActiveMaster {}] master.HMaster(3441): Registered master coprocessor service: service=AccessControlService 2024-12-05T12:02:12,347 INFO [master/80666a11a09f:0:becomeActiveMaster {}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-05T12:02:12,347 INFO [master/80666a11a09f:0:becomeActiveMaster {}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.SecureTestUtil$MasterSyncObserver loaded, priority=536870912. 2024-12-05T12:02:12,365 INFO [RS:1;80666a11a09f:37629 {}] regionserver.HRegionServer(746): ClusterId : af3bc8f5-5d92-4279-9ca6-4b73c59b8f54 2024-12-05T12:02:12,366 INFO [RS:0;80666a11a09f:42913 {}] regionserver.HRegionServer(746): ClusterId : af3bc8f5-5d92-4279-9ca6-4b73c59b8f54 2024-12-05T12:02:12,366 INFO [RS:2;80666a11a09f:42305 {}] regionserver.HRegionServer(746): ClusterId : af3bc8f5-5d92-4279-9ca6-4b73c59b8f54 2024-12-05T12:02:12,369 DEBUG [RS:2;80666a11a09f:42305 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-05T12:02:12,369 DEBUG [RS:1;80666a11a09f:37629 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-05T12:02:12,369 DEBUG [RS:0;80666a11a09f:42913 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-05T12:02:12,389 DEBUG [RS:0;80666a11a09f:42913 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-05T12:02:12,389 DEBUG [RS:1;80666a11a09f:37629 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-05T12:02:12,389 DEBUG [RS:2;80666a11a09f:42305 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-05T12:02:12,390 DEBUG [RS:0;80666a11a09f:42913 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-05T12:02:12,390 DEBUG [RS:1;80666a11a09f:37629 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-05T12:02:12,390 DEBUG [RS:2;80666a11a09f:42305 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-05T12:02:12,396 DEBUG [master/80666a11a09f:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-12-05T12:02:12,407 DEBUG [RS:1;80666a11a09f:37629 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-05T12:02:12,407 DEBUG [RS:2;80666a11a09f:42305 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-05T12:02:12,407 DEBUG [RS:0;80666a11a09f:42913 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-05T12:02:12,408 DEBUG [RS:2;80666a11a09f:42305 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6ef8e64b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=80666a11a09f/172.17.0.2:0 2024-12-05T12:02:12,408 DEBUG [RS:1;80666a11a09f:37629 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@683bfde0, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=80666a11a09f/172.17.0.2:0 2024-12-05T12:02:12,408 INFO [master/80666a11a09f:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-12-05T12:02:12,408 DEBUG [RS:0;80666a11a09f:42913 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1a5c5bb5, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=80666a11a09f/172.17.0.2:0 2024-12-05T12:02:12,418 INFO [master/80666a11a09f:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-05T12:02:12,424 DEBUG [RS:2;80666a11a09f:42305 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:2;80666a11a09f:42305 2024-12-05T12:02:12,423 DEBUG [master/80666a11a09f:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 80666a11a09f,34947,1733400129965 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-05T12:02:12,426 DEBUG [RS:1;80666a11a09f:37629 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:1;80666a11a09f:37629 2024-12-05T12:02:12,427 INFO [RS:2;80666a11a09f:42305 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-05T12:02:12,427 INFO [RS:2;80666a11a09f:42305 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-05T12:02:12,427 INFO [RS:1;80666a11a09f:37629 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-05T12:02:12,427 INFO [RS:1;80666a11a09f:37629 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-05T12:02:12,428 DEBUG [RS:2;80666a11a09f:42305 {}] regionserver.HRegionServer(660): Registered regionserver coprocessor executorService: executorService=AccessControlService 2024-12-05T12:02:12,429 INFO [RS:2;80666a11a09f:42305 {}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-05T12:02:12,429 DEBUG [RS:2;80666a11a09f:42305 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-05T12:02:12,429 DEBUG [RS:1;80666a11a09f:37629 {}] regionserver.HRegionServer(660): Registered regionserver coprocessor executorService: executorService=AccessControlService 2024-12-05T12:02:12,430 INFO [RS:1;80666a11a09f:37629 {}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-05T12:02:12,430 DEBUG [RS:1;80666a11a09f:37629 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-05T12:02:12,430 DEBUG [RS:0;80666a11a09f:42913 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;80666a11a09f:42913 2024-12-05T12:02:12,431 INFO [RS:0;80666a11a09f:42913 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-05T12:02:12,431 INFO [RS:0;80666a11a09f:42913 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-05T12:02:12,431 DEBUG [RS:0;80666a11a09f:42913 {}] regionserver.HRegionServer(660): Registered regionserver coprocessor executorService: executorService=AccessControlService 2024-12-05T12:02:12,432 INFO [RS:0;80666a11a09f:42913 {}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-05T12:02:12,432 DEBUG [RS:0;80666a11a09f:42913 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-05T12:02:12,432 INFO [RS:1;80666a11a09f:37629 {}] regionserver.HRegionServer(2659): reportForDuty to master=80666a11a09f,34947,1733400129965 with port=37629, startcode=1733400130958 2024-12-05T12:02:12,432 INFO [RS:2;80666a11a09f:42305 {}] regionserver.HRegionServer(2659): reportForDuty to master=80666a11a09f,34947,1733400129965 with port=42305, startcode=1733400131012 2024-12-05T12:02:12,433 DEBUG [master/80666a11a09f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/80666a11a09f:0, corePoolSize=5, maxPoolSize=5 2024-12-05T12:02:12,433 INFO [RS:0;80666a11a09f:42913 {}] regionserver.HRegionServer(2659): reportForDuty to master=80666a11a09f,34947,1733400129965 with port=42913, startcode=1733400130845 2024-12-05T12:02:12,433 DEBUG [master/80666a11a09f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/80666a11a09f:0, corePoolSize=5, maxPoolSize=5 2024-12-05T12:02:12,433 DEBUG [master/80666a11a09f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/80666a11a09f:0, corePoolSize=5, maxPoolSize=5 2024-12-05T12:02:12,433 DEBUG [master/80666a11a09f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/80666a11a09f:0, corePoolSize=5, maxPoolSize=5 2024-12-05T12:02:12,433 DEBUG [master/80666a11a09f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/80666a11a09f:0, corePoolSize=10, maxPoolSize=10 2024-12-05T12:02:12,433 DEBUG [master/80666a11a09f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/80666a11a09f:0, corePoolSize=1, maxPoolSize=1 2024-12-05T12:02:12,434 DEBUG [master/80666a11a09f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/80666a11a09f:0, corePoolSize=2, maxPoolSize=2 2024-12-05T12:02:12,434 DEBUG [master/80666a11a09f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/80666a11a09f:0, corePoolSize=1, maxPoolSize=1 2024-12-05T12:02:12,440 INFO [master/80666a11a09f:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1733400162440 2024-12-05T12:02:12,440 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-05T12:02:12,441 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-12-05T12:02:12,442 INFO [master/80666a11a09f:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-05T12:02:12,443 INFO [master/80666a11a09f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-05T12:02:12,445 DEBUG [RS:0;80666a11a09f:42913 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-05T12:02:12,445 DEBUG [RS:1;80666a11a09f:37629 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-05T12:02:12,445 DEBUG [RS:2;80666a11a09f:42305 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-05T12:02:12,447 INFO [master/80666a11a09f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-05T12:02:12,447 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T12:02:12,447 INFO [master/80666a11a09f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-05T12:02:12,447 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-05T12:02:12,448 INFO [master/80666a11a09f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-05T12:02:12,448 INFO [master/80666a11a09f:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-05T12:02:12,449 INFO [master/80666a11a09f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-05T12:02:12,455 INFO [master/80666a11a09f:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-05T12:02:12,456 INFO [master/80666a11a09f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-05T12:02:12,457 INFO [master/80666a11a09f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-05T12:02:12,467 INFO [master/80666a11a09f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-05T12:02:12,467 INFO [master/80666a11a09f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-05T12:02:12,481 DEBUG [master/80666a11a09f:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/80666a11a09f:0:becomeActiveMaster-HFileCleaner.large.0-1733400132468,5,FailOnTimeoutGroup] 2024-12-05T12:02:12,487 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34575, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-12-05T12:02:12,487 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50637, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.2 (auth:SIMPLE), service=RegionServerStatusService 2024-12-05T12:02:12,487 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49563, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.1 (auth:SIMPLE), service=RegionServerStatusService 2024-12-05T12:02:12,495 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34947 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3334) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerStartup(MasterRpcServices.java:667) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16714) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:4.0.0-alpha-1-SNAPSHOT] 2024-12-05T12:02:12,500 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34947 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3334) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerStartup(MasterRpcServices.java:667) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16714) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:4.0.0-alpha-1-SNAPSHOT] 2024-12-05T12:02:12,501 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34947 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3334) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerStartup(MasterRpcServices.java:667) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16714) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:4.0.0-alpha-1-SNAPSHOT] 2024-12-05T12:02:12,501 DEBUG [master/80666a11a09f:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/80666a11a09f:0:becomeActiveMaster-HFileCleaner.small.0-1733400132483,5,FailOnTimeoutGroup] 2024-12-05T12:02:12,502 INFO [master/80666a11a09f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-05T12:02:12,502 INFO [master/80666a11a09f:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-05T12:02:12,504 INFO [master/80666a11a09f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-05T12:02:12,504 INFO [master/80666a11a09f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-05T12:02:12,506 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073741831_1007 (size=1321) 2024-12-05T12:02:12,506 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073741831_1007 (size=1321) 2024-12-05T12:02:12,507 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073741831_1007 (size=1321) 2024-12-05T12:02:12,512 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-12-05T12:02:12,513 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300 2024-12-05T12:02:12,532 DEBUG [RS:1;80666a11a09f:37629 {}] regionserver.HRegionServer(2683): Master is not running yet 2024-12-05T12:02:12,532 DEBUG [RS:0;80666a11a09f:42913 {}] regionserver.HRegionServer(2683): Master is not running yet 2024-12-05T12:02:12,532 WARN [RS:0;80666a11a09f:42913 {}] regionserver.HRegionServer(841): reportForDuty failed; sleeping 100 ms and then retrying. 2024-12-05T12:02:12,532 WARN [RS:1;80666a11a09f:37629 {}] regionserver.HRegionServer(841): reportForDuty failed; sleeping 100 ms and then retrying. 2024-12-05T12:02:12,532 DEBUG [RS:2;80666a11a09f:42305 {}] regionserver.HRegionServer(2683): Master is not running yet 2024-12-05T12:02:12,532 WARN [RS:2;80666a11a09f:42305 {}] regionserver.HRegionServer(841): reportForDuty failed; sleeping 100 ms and then retrying. 2024-12-05T12:02:12,558 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073741832_1008 (size=32) 2024-12-05T12:02:12,559 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073741832_1008 (size=32) 2024-12-05T12:02:12,560 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073741832_1008 (size=32) 2024-12-05T12:02:12,563 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-05T12:02:12,582 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-05T12:02:12,587 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-05T12:02:12,587 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T12:02:12,589 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-05T12:02:12,589 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-05T12:02:12,594 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-05T12:02:12,594 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T12:02:12,596 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-05T12:02:12,596 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-05T12:02:12,600 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-05T12:02:12,600 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T12:02:12,601 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-05T12:02:12,601 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-05T12:02:12,605 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-05T12:02:12,605 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T12:02:12,608 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-05T12:02:12,609 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-05T12:02:12,611 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/hbase/meta/1588230740 2024-12-05T12:02:12,614 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/hbase/meta/1588230740 2024-12-05T12:02:12,619 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-05T12:02:12,619 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-05T12:02:12,621 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-05T12:02:12,625 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-05T12:02:12,634 INFO [RS:0;80666a11a09f:42913 {}] regionserver.HRegionServer(2659): reportForDuty to master=80666a11a09f,34947,1733400129965 with port=42913, startcode=1733400130845 2024-12-05T12:02:12,634 INFO [RS:2;80666a11a09f:42305 {}] regionserver.HRegionServer(2659): reportForDuty to master=80666a11a09f,34947,1733400129965 with port=42305, startcode=1733400131012 2024-12-05T12:02:12,634 INFO [RS:1;80666a11a09f:37629 {}] regionserver.HRegionServer(2659): reportForDuty to master=80666a11a09f,34947,1733400129965 with port=37629, startcode=1733400130958 2024-12-05T12:02:12,636 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-05T12:02:12,636 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34947 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 80666a11a09f,42913,1733400130845 2024-12-05T12:02:12,637 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=67519320, jitterRate=0.006116271018981934}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-05T12:02:12,640 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34947 {}] master.ServerManager(517): Registering regionserver=80666a11a09f,42913,1733400130845 2024-12-05T12:02:12,641 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1733400132563Initializing all the Stores at 1733400132581 (+18 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733400132581Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733400132582 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733400132582Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733400132582Cleaning up temporary data from old regions at 1733400132619 (+37 ms)Region opened successfully at 1733400132641 (+22 ms) 2024-12-05T12:02:12,642 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-05T12:02:12,642 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-05T12:02:12,642 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-05T12:02:12,642 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-05T12:02:12,642 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-05T12:02:12,652 DEBUG [RS:0;80666a11a09f:42913 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300 2024-12-05T12:02:12,653 DEBUG [RS:0;80666a11a09f:42913 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:37541 2024-12-05T12:02:12,653 DEBUG [RS:0;80666a11a09f:42913 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-05T12:02:12,655 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-05T12:02:12,656 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733400132641Disabling compacts and flushes for region at 1733400132641Disabling writes for close at 1733400132642 (+1 ms)Writing region close event to WAL at 1733400132655 (+13 ms)Closed at 1733400132655 2024-12-05T12:02:12,661 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-05T12:02:12,661 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-12-05T12:02:12,662 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34947-0x101a6a890970000, quorum=127.0.0.1:49240, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-05T12:02:12,663 DEBUG [RS:0;80666a11a09f:42913 {}] zookeeper.ZKUtil(111): regionserver:42913-0x101a6a890970001, quorum=127.0.0.1:49240, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/80666a11a09f,42913,1733400130845 2024-12-05T12:02:12,663 WARN [RS:0;80666a11a09f:42913 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-05T12:02:12,664 INFO [RS:0;80666a11a09f:42913 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-05T12:02:12,664 DEBUG [RS:0;80666a11a09f:42913 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/WALs/80666a11a09f,42913,1733400130845 2024-12-05T12:02:12,664 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34947 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 80666a11a09f,42305,1733400131012 2024-12-05T12:02:12,664 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34947 {}] master.ServerManager(517): Registering regionserver=80666a11a09f,42305,1733400131012 2024-12-05T12:02:12,671 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-05T12:02:12,677 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [80666a11a09f,42913,1733400130845] 2024-12-05T12:02:12,683 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34947 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 80666a11a09f,37629,1733400130958 2024-12-05T12:02:12,683 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34947 {}] master.ServerManager(517): Registering regionserver=80666a11a09f,37629,1733400130958 2024-12-05T12:02:12,683 DEBUG [RS:2;80666a11a09f:42305 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300 2024-12-05T12:02:12,683 DEBUG [RS:2;80666a11a09f:42305 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:37541 2024-12-05T12:02:12,684 DEBUG [RS:2;80666a11a09f:42305 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-05T12:02:12,690 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-05T12:02:12,693 DEBUG [RS:1;80666a11a09f:37629 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300 2024-12-05T12:02:12,693 DEBUG [RS:1;80666a11a09f:37629 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:37541 2024-12-05T12:02:12,693 DEBUG [RS:1;80666a11a09f:37629 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-05T12:02:12,698 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-05T12:02:12,712 INFO [RS:0;80666a11a09f:42913 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-05T12:02:12,719 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34947-0x101a6a890970000, quorum=127.0.0.1:49240, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-05T12:02:12,731 INFO [RS:0;80666a11a09f:42913 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-05T12:02:12,734 DEBUG [RS:2;80666a11a09f:42305 {}] zookeeper.ZKUtil(111): regionserver:42305-0x101a6a890970003, quorum=127.0.0.1:49240, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/80666a11a09f,42305,1733400131012 2024-12-05T12:02:12,734 WARN [RS:2;80666a11a09f:42305 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-05T12:02:12,734 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [80666a11a09f,42305,1733400131012] 2024-12-05T12:02:12,734 INFO [RS:2;80666a11a09f:42305 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-05T12:02:12,734 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [80666a11a09f,37629,1733400130958] 2024-12-05T12:02:12,735 DEBUG [RS:1;80666a11a09f:37629 {}] zookeeper.ZKUtil(111): regionserver:37629-0x101a6a890970002, quorum=127.0.0.1:49240, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/80666a11a09f,37629,1733400130958 2024-12-05T12:02:12,735 DEBUG [RS:2;80666a11a09f:42305 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/WALs/80666a11a09f,42305,1733400131012 2024-12-05T12:02:12,735 WARN [RS:1;80666a11a09f:37629 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-05T12:02:12,735 INFO [RS:1;80666a11a09f:37629 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-05T12:02:12,735 DEBUG [RS:1;80666a11a09f:37629 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/WALs/80666a11a09f,37629,1733400130958 2024-12-05T12:02:12,743 INFO [RS:0;80666a11a09f:42913 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-05T12:02:12,744 INFO [RS:0;80666a11a09f:42913 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-05T12:02:12,752 INFO [RS:2;80666a11a09f:42305 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-05T12:02:12,752 INFO [RS:1;80666a11a09f:37629 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-05T12:02:12,753 INFO [RS:0;80666a11a09f:42913 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-05T12:02:12,763 INFO [RS:1;80666a11a09f:37629 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-05T12:02:12,763 INFO [RS:0;80666a11a09f:42913 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-05T12:02:12,765 INFO [RS:0;80666a11a09f:42913 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-05T12:02:12,766 DEBUG [RS:0;80666a11a09f:42913 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/80666a11a09f:0, corePoolSize=1, maxPoolSize=1 2024-12-05T12:02:12,766 DEBUG [RS:0;80666a11a09f:42913 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/80666a11a09f:0, corePoolSize=1, maxPoolSize=1 2024-12-05T12:02:12,766 DEBUG [RS:0;80666a11a09f:42913 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/80666a11a09f:0, corePoolSize=1, maxPoolSize=1 2024-12-05T12:02:12,766 DEBUG [RS:0;80666a11a09f:42913 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/80666a11a09f:0, corePoolSize=1, maxPoolSize=1 2024-12-05T12:02:12,767 DEBUG [RS:0;80666a11a09f:42913 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/80666a11a09f:0, corePoolSize=1, maxPoolSize=1 2024-12-05T12:02:12,767 DEBUG [RS:0;80666a11a09f:42913 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/80666a11a09f:0, corePoolSize=2, maxPoolSize=2 2024-12-05T12:02:12,767 DEBUG [RS:0;80666a11a09f:42913 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/80666a11a09f:0, corePoolSize=1, maxPoolSize=1 2024-12-05T12:02:12,767 DEBUG [RS:0;80666a11a09f:42913 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/80666a11a09f:0, corePoolSize=1, maxPoolSize=1 2024-12-05T12:02:12,767 DEBUG [RS:0;80666a11a09f:42913 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/80666a11a09f:0, corePoolSize=1, maxPoolSize=1 2024-12-05T12:02:12,768 DEBUG [RS:0;80666a11a09f:42913 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/80666a11a09f:0, corePoolSize=1, maxPoolSize=1 2024-12-05T12:02:12,768 DEBUG [RS:0;80666a11a09f:42913 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/80666a11a09f:0, corePoolSize=1, maxPoolSize=1 2024-12-05T12:02:12,768 DEBUG [RS:0;80666a11a09f:42913 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/80666a11a09f:0, corePoolSize=1, maxPoolSize=1 2024-12-05T12:02:12,768 DEBUG [RS:0;80666a11a09f:42913 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0, corePoolSize=3, maxPoolSize=3 2024-12-05T12:02:12,768 DEBUG [RS:0;80666a11a09f:42913 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/80666a11a09f:0, corePoolSize=3, maxPoolSize=3 2024-12-05T12:02:12,769 INFO [RS:2;80666a11a09f:42305 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-05T12:02:12,769 INFO [RS:1;80666a11a09f:37629 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-05T12:02:12,769 INFO [RS:1;80666a11a09f:37629 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-05T12:02:12,770 INFO [RS:2;80666a11a09f:42305 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-05T12:02:12,770 INFO [RS:2;80666a11a09f:42305 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-05T12:02:12,777 INFO [RS:1;80666a11a09f:37629 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-05T12:02:12,777 INFO [RS:2;80666a11a09f:42305 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-05T12:02:12,779 INFO [RS:1;80666a11a09f:37629 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-05T12:02:12,779 INFO [RS:1;80666a11a09f:37629 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-05T12:02:12,779 DEBUG [RS:1;80666a11a09f:37629 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/80666a11a09f:0, corePoolSize=1, maxPoolSize=1 2024-12-05T12:02:12,779 DEBUG [RS:1;80666a11a09f:37629 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/80666a11a09f:0, corePoolSize=1, maxPoolSize=1 2024-12-05T12:02:12,779 INFO [RS:2;80666a11a09f:42305 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-05T12:02:12,780 DEBUG [RS:1;80666a11a09f:37629 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/80666a11a09f:0, corePoolSize=1, maxPoolSize=1 2024-12-05T12:02:12,780 DEBUG [RS:1;80666a11a09f:37629 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/80666a11a09f:0, corePoolSize=1, maxPoolSize=1 2024-12-05T12:02:12,780 INFO [RS:2;80666a11a09f:42305 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-05T12:02:12,780 DEBUG [RS:1;80666a11a09f:37629 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/80666a11a09f:0, corePoolSize=1, maxPoolSize=1 2024-12-05T12:02:12,780 DEBUG [RS:2;80666a11a09f:42305 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/80666a11a09f:0, corePoolSize=1, maxPoolSize=1 2024-12-05T12:02:12,780 DEBUG [RS:1;80666a11a09f:37629 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/80666a11a09f:0, corePoolSize=2, maxPoolSize=2 2024-12-05T12:02:12,780 DEBUG [RS:2;80666a11a09f:42305 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/80666a11a09f:0, corePoolSize=1, maxPoolSize=1 2024-12-05T12:02:12,780 DEBUG [RS:1;80666a11a09f:37629 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/80666a11a09f:0, corePoolSize=1, maxPoolSize=1 2024-12-05T12:02:12,780 DEBUG [RS:2;80666a11a09f:42305 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/80666a11a09f:0, corePoolSize=1, maxPoolSize=1 2024-12-05T12:02:12,780 DEBUG [RS:1;80666a11a09f:37629 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/80666a11a09f:0, corePoolSize=1, maxPoolSize=1 2024-12-05T12:02:12,780 DEBUG [RS:2;80666a11a09f:42305 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/80666a11a09f:0, corePoolSize=1, maxPoolSize=1 2024-12-05T12:02:12,780 DEBUG [RS:2;80666a11a09f:42305 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/80666a11a09f:0, corePoolSize=1, maxPoolSize=1 2024-12-05T12:02:12,780 DEBUG [RS:1;80666a11a09f:37629 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/80666a11a09f:0, corePoolSize=1, maxPoolSize=1 2024-12-05T12:02:12,781 DEBUG [RS:2;80666a11a09f:42305 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/80666a11a09f:0, corePoolSize=2, maxPoolSize=2 2024-12-05T12:02:12,781 DEBUG [RS:1;80666a11a09f:37629 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/80666a11a09f:0, corePoolSize=1, maxPoolSize=1 2024-12-05T12:02:12,781 DEBUG [RS:1;80666a11a09f:37629 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/80666a11a09f:0, corePoolSize=1, maxPoolSize=1 2024-12-05T12:02:12,781 DEBUG [RS:2;80666a11a09f:42305 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/80666a11a09f:0, corePoolSize=1, maxPoolSize=1 2024-12-05T12:02:12,781 DEBUG [RS:1;80666a11a09f:37629 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/80666a11a09f:0, corePoolSize=1, maxPoolSize=1 2024-12-05T12:02:12,781 DEBUG [RS:2;80666a11a09f:42305 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/80666a11a09f:0, corePoolSize=1, maxPoolSize=1 2024-12-05T12:02:12,781 DEBUG [RS:1;80666a11a09f:37629 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0, corePoolSize=3, maxPoolSize=3 2024-12-05T12:02:12,781 DEBUG [RS:2;80666a11a09f:42305 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/80666a11a09f:0, corePoolSize=1, maxPoolSize=1 2024-12-05T12:02:12,781 DEBUG [RS:1;80666a11a09f:37629 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/80666a11a09f:0, corePoolSize=3, maxPoolSize=3 2024-12-05T12:02:12,781 DEBUG [RS:2;80666a11a09f:42305 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/80666a11a09f:0, corePoolSize=1, maxPoolSize=1 2024-12-05T12:02:12,781 DEBUG [RS:2;80666a11a09f:42305 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/80666a11a09f:0, corePoolSize=1, maxPoolSize=1 2024-12-05T12:02:12,781 DEBUG [RS:2;80666a11a09f:42305 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/80666a11a09f:0, corePoolSize=1, maxPoolSize=1 2024-12-05T12:02:12,781 DEBUG [RS:2;80666a11a09f:42305 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0, corePoolSize=3, maxPoolSize=3 2024-12-05T12:02:12,781 DEBUG [RS:2;80666a11a09f:42305 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/80666a11a09f:0, corePoolSize=3, maxPoolSize=3 2024-12-05T12:02:12,782 INFO [RS:0;80666a11a09f:42913 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-05T12:02:12,782 INFO [RS:0;80666a11a09f:42913 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-05T12:02:12,782 INFO [RS:0;80666a11a09f:42913 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-05T12:02:12,782 INFO [RS:0;80666a11a09f:42913 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-05T12:02:12,782 INFO [RS:0;80666a11a09f:42913 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-05T12:02:12,782 INFO [RS:0;80666a11a09f:42913 {}] hbase.ChoreService(168): Chore ScheduledChore name=80666a11a09f,42913,1733400130845-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-05T12:02:12,789 INFO [RS:2;80666a11a09f:42305 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-05T12:02:12,789 INFO [RS:2;80666a11a09f:42305 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-05T12:02:12,789 INFO [RS:2;80666a11a09f:42305 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-05T12:02:12,789 INFO [RS:2;80666a11a09f:42305 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-05T12:02:12,789 INFO [RS:2;80666a11a09f:42305 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-05T12:02:12,789 INFO [RS:2;80666a11a09f:42305 {}] hbase.ChoreService(168): Chore ScheduledChore name=80666a11a09f,42305,1733400131012-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-05T12:02:12,796 INFO [RS:1;80666a11a09f:37629 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-05T12:02:12,797 INFO [RS:1;80666a11a09f:37629 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-05T12:02:12,797 INFO [RS:1;80666a11a09f:37629 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-05T12:02:12,797 INFO [RS:1;80666a11a09f:37629 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-05T12:02:12,797 INFO [RS:1;80666a11a09f:37629 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-05T12:02:12,797 INFO [RS:1;80666a11a09f:37629 {}] hbase.ChoreService(168): Chore ScheduledChore name=80666a11a09f,37629,1733400130958-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-05T12:02:12,824 INFO [RS:0;80666a11a09f:42913 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-05T12:02:12,828 INFO [RS:2;80666a11a09f:42305 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-05T12:02:12,828 INFO [RS:1;80666a11a09f:37629 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-05T12:02:12,829 INFO [RS:0;80666a11a09f:42913 {}] hbase.ChoreService(168): Chore ScheduledChore name=80666a11a09f,42913,1733400130845-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-05T12:02:12,830 INFO [RS:2;80666a11a09f:42305 {}] hbase.ChoreService(168): Chore ScheduledChore name=80666a11a09f,42305,1733400131012-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-05T12:02:12,830 INFO [RS:1;80666a11a09f:37629 {}] hbase.ChoreService(168): Chore ScheduledChore name=80666a11a09f,37629,1733400130958-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-05T12:02:12,830 INFO [RS:2;80666a11a09f:42305 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-05T12:02:12,830 INFO [RS:2;80666a11a09f:42305 {}] regionserver.Replication(171): 80666a11a09f,42305,1733400131012 started 2024-12-05T12:02:12,830 INFO [RS:1;80666a11a09f:37629 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-05T12:02:12,830 INFO [RS:0;80666a11a09f:42913 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-05T12:02:12,831 INFO [RS:1;80666a11a09f:37629 {}] regionserver.Replication(171): 80666a11a09f,37629,1733400130958 started 2024-12-05T12:02:12,831 INFO [RS:0;80666a11a09f:42913 {}] regionserver.Replication(171): 80666a11a09f,42913,1733400130845 started 2024-12-05T12:02:12,849 WARN [80666a11a09f:34947 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-12-05T12:02:12,855 INFO [RS:1;80666a11a09f:37629 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-05T12:02:12,857 INFO [RS:1;80666a11a09f:37629 {}] regionserver.HRegionServer(1482): Serving as 80666a11a09f,37629,1733400130958, RpcServer on 80666a11a09f/172.17.0.2:37629, sessionid=0x101a6a890970002 2024-12-05T12:02:12,858 INFO [RS:0;80666a11a09f:42913 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-05T12:02:12,858 INFO [RS:0;80666a11a09f:42913 {}] regionserver.HRegionServer(1482): Serving as 80666a11a09f,42913,1733400130845, RpcServer on 80666a11a09f/172.17.0.2:42913, sessionid=0x101a6a890970001 2024-12-05T12:02:12,859 DEBUG [RS:0;80666a11a09f:42913 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-05T12:02:12,859 DEBUG [RS:0;80666a11a09f:42913 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 80666a11a09f,42913,1733400130845 2024-12-05T12:02:12,860 DEBUG [RS:0;80666a11a09f:42913 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '80666a11a09f,42913,1733400130845' 2024-12-05T12:02:12,860 DEBUG [RS:0;80666a11a09f:42913 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-05T12:02:12,860 DEBUG [RS:1;80666a11a09f:37629 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-05T12:02:12,860 DEBUG [RS:1;80666a11a09f:37629 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 80666a11a09f,37629,1733400130958 2024-12-05T12:02:12,860 DEBUG [RS:1;80666a11a09f:37629 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '80666a11a09f,37629,1733400130958' 2024-12-05T12:02:12,860 DEBUG [RS:1;80666a11a09f:37629 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-05T12:02:12,861 INFO [RS:2;80666a11a09f:42305 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-05T12:02:12,861 INFO [RS:2;80666a11a09f:42305 {}] regionserver.HRegionServer(1482): Serving as 80666a11a09f,42305,1733400131012, RpcServer on 80666a11a09f/172.17.0.2:42305, sessionid=0x101a6a890970003 2024-12-05T12:02:12,861 DEBUG [RS:2;80666a11a09f:42305 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-05T12:02:12,861 DEBUG [RS:2;80666a11a09f:42305 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 80666a11a09f,42305,1733400131012 2024-12-05T12:02:12,861 DEBUG [RS:2;80666a11a09f:42305 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '80666a11a09f,42305,1733400131012' 2024-12-05T12:02:12,861 DEBUG [RS:2;80666a11a09f:42305 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-05T12:02:12,862 DEBUG [RS:0;80666a11a09f:42913 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-05T12:02:12,862 DEBUG [RS:1;80666a11a09f:37629 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-05T12:02:12,863 DEBUG [RS:2;80666a11a09f:42305 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-05T12:02:12,864 DEBUG [RS:1;80666a11a09f:37629 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-05T12:02:12,864 DEBUG [RS:1;80666a11a09f:37629 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-05T12:02:12,864 DEBUG [RS:0;80666a11a09f:42913 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-05T12:02:12,864 DEBUG [RS:0;80666a11a09f:42913 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-05T12:02:12,864 DEBUG [RS:1;80666a11a09f:37629 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 80666a11a09f,37629,1733400130958 2024-12-05T12:02:12,864 DEBUG [RS:0;80666a11a09f:42913 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 80666a11a09f,42913,1733400130845 2024-12-05T12:02:12,864 DEBUG [RS:0;80666a11a09f:42913 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '80666a11a09f,42913,1733400130845' 2024-12-05T12:02:12,864 DEBUG [RS:1;80666a11a09f:37629 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '80666a11a09f,37629,1733400130958' 2024-12-05T12:02:12,864 DEBUG [RS:0;80666a11a09f:42913 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-05T12:02:12,864 DEBUG [RS:1;80666a11a09f:37629 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-05T12:02:12,865 DEBUG [RS:1;80666a11a09f:37629 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-05T12:02:12,866 DEBUG [RS:0;80666a11a09f:42913 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-05T12:02:12,867 DEBUG [RS:0;80666a11a09f:42913 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-05T12:02:12,867 INFO [RS:0;80666a11a09f:42913 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-05T12:02:12,867 INFO [RS:0;80666a11a09f:42913 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-05T12:02:12,869 DEBUG [RS:1;80666a11a09f:37629 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-05T12:02:12,869 INFO [RS:1;80666a11a09f:37629 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-05T12:02:12,869 INFO [RS:1;80666a11a09f:37629 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-05T12:02:12,872 DEBUG [RS:2;80666a11a09f:42305 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-05T12:02:12,873 DEBUG [RS:2;80666a11a09f:42305 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-05T12:02:12,873 DEBUG [RS:2;80666a11a09f:42305 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 80666a11a09f,42305,1733400131012 2024-12-05T12:02:12,873 DEBUG [RS:2;80666a11a09f:42305 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '80666a11a09f,42305,1733400131012' 2024-12-05T12:02:12,873 DEBUG [RS:2;80666a11a09f:42305 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-05T12:02:12,874 DEBUG [RS:2;80666a11a09f:42305 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-05T12:02:12,875 DEBUG [RS:2;80666a11a09f:42305 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-05T12:02:12,875 INFO [RS:2;80666a11a09f:42305 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-05T12:02:12,875 INFO [RS:2;80666a11a09f:42305 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-05T12:02:12,973 INFO [RS:1;80666a11a09f:37629 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-05T12:02:12,974 INFO [RS:0;80666a11a09f:42913 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-05T12:02:12,976 INFO [RS:2;80666a11a09f:42305 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-05T12:02:12,976 INFO [RS:1;80666a11a09f:37629 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=80666a11a09f%2C37629%2C1733400130958, suffix=, logDir=hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/WALs/80666a11a09f,37629,1733400130958, archiveDir=hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/oldWALs, maxLogs=32 2024-12-05T12:02:12,980 INFO [RS:2;80666a11a09f:42305 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=80666a11a09f%2C42305%2C1733400131012, suffix=, logDir=hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/WALs/80666a11a09f,42305,1733400131012, archiveDir=hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/oldWALs, maxLogs=32 2024-12-05T12:02:12,985 INFO [RS:0;80666a11a09f:42913 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=80666a11a09f%2C42913%2C1733400130845, suffix=, logDir=hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/WALs/80666a11a09f,42913,1733400130845, archiveDir=hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/oldWALs, maxLogs=32 2024-12-05T12:02:13,016 DEBUG [RS:1;80666a11a09f:37629 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/WALs/80666a11a09f,37629,1733400130958/80666a11a09f%2C37629%2C1733400130958.1733400132981, exclude list is [], retry=0 2024-12-05T12:02:13,016 DEBUG [RS:0;80666a11a09f:42913 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/WALs/80666a11a09f,42913,1733400130845/80666a11a09f%2C42913%2C1733400130845.1733400132988, exclude list is [], retry=0 2024-12-05T12:02:13,021 DEBUG [RS:2;80666a11a09f:42305 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/WALs/80666a11a09f,42305,1733400131012/80666a11a09f%2C42305%2C1733400131012.1733400132985, exclude list is [], retry=0 2024-12-05T12:02:13,028 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:35565,DS-629f3698-fec0-408e-9eb6-41b26b8f31e2,DISK] 2024-12-05T12:02:13,028 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:37713,DS-ee047669-e603-4685-b719-a5ac8b615655,DISK] 2024-12-05T12:02:13,028 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:39603,DS-3b5287fb-7177-4c9b-b36f-a90d42a8b13c,DISK] 2024-12-05T12:02:13,029 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:35565,DS-629f3698-fec0-408e-9eb6-41b26b8f31e2,DISK] 2024-12-05T12:02:13,030 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:37713,DS-ee047669-e603-4685-b719-a5ac8b615655,DISK] 2024-12-05T12:02:13,030 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:39603,DS-3b5287fb-7177-4c9b-b36f-a90d42a8b13c,DISK] 2024-12-05T12:02:13,031 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:37713,DS-ee047669-e603-4685-b719-a5ac8b615655,DISK] 2024-12-05T12:02:13,031 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:35565,DS-629f3698-fec0-408e-9eb6-41b26b8f31e2,DISK] 2024-12-05T12:02:13,031 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:39603,DS-3b5287fb-7177-4c9b-b36f-a90d42a8b13c,DISK] 2024-12-05T12:02:13,072 INFO [RS:0;80666a11a09f:42913 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/WALs/80666a11a09f,42913,1733400130845/80666a11a09f%2C42913%2C1733400130845.1733400132988 2024-12-05T12:02:13,072 INFO [RS:2;80666a11a09f:42305 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/WALs/80666a11a09f,42305,1733400131012/80666a11a09f%2C42305%2C1733400131012.1733400132985 2024-12-05T12:02:13,072 INFO [RS:1;80666a11a09f:37629 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/WALs/80666a11a09f,37629,1733400130958/80666a11a09f%2C37629%2C1733400130958.1733400132981 2024-12-05T12:02:13,074 DEBUG [RS:0;80666a11a09f:42913 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:35055:35055),(127.0.0.1/127.0.0.1:40273:40273),(127.0.0.1/127.0.0.1:46789:46789)] 2024-12-05T12:02:13,074 DEBUG [RS:2;80666a11a09f:42305 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:35055:35055),(127.0.0.1/127.0.0.1:40273:40273),(127.0.0.1/127.0.0.1:46789:46789)] 2024-12-05T12:02:13,074 DEBUG [RS:1;80666a11a09f:37629 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:35055:35055),(127.0.0.1/127.0.0.1:46789:46789),(127.0.0.1/127.0.0.1:40273:40273)] 2024-12-05T12:02:13,101 DEBUG [80666a11a09f:34947 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=3, allServersCount=3 2024-12-05T12:02:13,107 DEBUG [80666a11a09f:34947 {}] balancer.BalancerClusterState(204): Hosts are {80666a11a09f=0} racks are {/default-rack=0} 2024-12-05T12:02:13,113 DEBUG [80666a11a09f:34947 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-05T12:02:13,113 DEBUG [80666a11a09f:34947 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-05T12:02:13,113 DEBUG [80666a11a09f:34947 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-05T12:02:13,113 DEBUG [80666a11a09f:34947 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-05T12:02:13,113 DEBUG [80666a11a09f:34947 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-05T12:02:13,113 DEBUG [80666a11a09f:34947 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-05T12:02:13,113 INFO [80666a11a09f:34947 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-05T12:02:13,113 INFO [80666a11a09f:34947 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-05T12:02:13,113 INFO [80666a11a09f:34947 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-05T12:02:13,113 DEBUG [80666a11a09f:34947 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-05T12:02:13,121 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=80666a11a09f,37629,1733400130958 2024-12-05T12:02:13,130 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 80666a11a09f,37629,1733400130958, state=OPENING 2024-12-05T12:02:13,142 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-05T12:02:13,217 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42305-0x101a6a890970003, quorum=127.0.0.1:49240, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T12:02:13,217 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34947-0x101a6a890970000, quorum=127.0.0.1:49240, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T12:02:13,217 DEBUG [pool-63-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42913-0x101a6a890970001, quorum=127.0.0.1:49240, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T12:02:13,217 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37629-0x101a6a890970002, quorum=127.0.0.1:49240, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T12:02:13,218 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-05T12:02:13,218 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-05T12:02:13,218 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-05T12:02:13,219 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-05T12:02:13,220 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-05T12:02:13,223 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=80666a11a09f,37629,1733400130958}] 2024-12-05T12:02:13,401 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-05T12:02:13,403 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49775, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-05T12:02:13,416 INFO [RS_OPEN_META-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-12-05T12:02:13,416 INFO [RS_OPEN_META-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-05T12:02:13,417 INFO [RS_OPEN_META-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor .meta 2024-12-05T12:02:13,420 INFO [RS_OPEN_META-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=80666a11a09f%2C37629%2C1733400130958.meta, suffix=.meta, logDir=hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/WALs/80666a11a09f,37629,1733400130958, archiveDir=hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/oldWALs, maxLogs=32 2024-12-05T12:02:13,434 DEBUG [RS_OPEN_META-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_META, pid=3}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/WALs/80666a11a09f,37629,1733400130958/80666a11a09f%2C37629%2C1733400130958.meta.1733400133421.meta, exclude list is [], retry=0 2024-12-05T12:02:13,439 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:35565,DS-629f3698-fec0-408e-9eb6-41b26b8f31e2,DISK] 2024-12-05T12:02:13,439 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:39603,DS-3b5287fb-7177-4c9b-b36f-a90d42a8b13c,DISK] 2024-12-05T12:02:13,439 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:37713,DS-ee047669-e603-4685-b719-a5ac8b615655,DISK] 2024-12-05T12:02:13,442 INFO [RS_OPEN_META-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/WALs/80666a11a09f,37629,1733400130958/80666a11a09f%2C37629%2C1733400130958.meta.1733400133421.meta 2024-12-05T12:02:13,443 DEBUG [RS_OPEN_META-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:35055:35055),(127.0.0.1/127.0.0.1:40273:40273),(127.0.0.1/127.0.0.1:46789:46789)] 2024-12-05T12:02:13,443 DEBUG [RS_OPEN_META-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-05T12:02:13,444 DEBUG [RS_OPEN_META-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=AccessControlService 2024-12-05T12:02:13,445 INFO [RS_OPEN_META-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-05T12:02:13,446 DEBUG [RS_OPEN_META-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-05T12:02:13,447 DEBUG [RS_OPEN_META-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-05T12:02:13,449 INFO [RS_OPEN_META-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-05T12:02:13,455 DEBUG [RS_OPEN_META-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-05T12:02:13,456 DEBUG [RS_OPEN_META-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-05T12:02:13,456 DEBUG [RS_OPEN_META-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-12-05T12:02:13,456 DEBUG [RS_OPEN_META-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-12-05T12:02:13,459 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-05T12:02:13,461 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-05T12:02:13,461 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T12:02:13,462 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-05T12:02:13,462 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-05T12:02:13,464 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-05T12:02:13,464 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T12:02:13,465 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-05T12:02:13,465 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-05T12:02:13,467 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-05T12:02:13,467 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T12:02:13,468 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-05T12:02:13,468 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-05T12:02:13,470 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-05T12:02:13,470 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T12:02:13,471 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-05T12:02:13,471 DEBUG [RS_OPEN_META-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-05T12:02:13,473 DEBUG [RS_OPEN_META-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/hbase/meta/1588230740 2024-12-05T12:02:13,476 DEBUG [RS_OPEN_META-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/hbase/meta/1588230740 2024-12-05T12:02:13,479 DEBUG [RS_OPEN_META-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-05T12:02:13,479 DEBUG [RS_OPEN_META-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-05T12:02:13,481 DEBUG [RS_OPEN_META-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-05T12:02:13,484 DEBUG [RS_OPEN_META-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-05T12:02:13,487 INFO [RS_OPEN_META-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=59608696, jitterRate=-0.11176121234893799}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-05T12:02:13,487 DEBUG [RS_OPEN_META-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-12-05T12:02:13,491 DEBUG [RS_OPEN_META-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1733400133456Writing region info on filesystem at 1733400133456Initializing all the Stores at 1733400133458 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733400133458Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733400133459 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733400133459Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733400133459Cleaning up temporary data from old regions at 1733400133480 (+21 ms)Running coprocessor post-open hooks at 1733400133487 (+7 ms)Region opened successfully at 1733400133491 (+4 ms) 2024-12-05T12:02:13,499 INFO [RS_OPEN_META-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1733400133391 2024-12-05T12:02:13,512 DEBUG [RS_OPEN_META-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-05T12:02:13,512 INFO [RS_OPEN_META-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-12-05T12:02:13,515 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=80666a11a09f,37629,1733400130958 2024-12-05T12:02:13,519 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 80666a11a09f,37629,1733400130958, state=OPEN 2024-12-05T12:02:13,529 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42305-0x101a6a890970003, quorum=127.0.0.1:49240, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-05T12:02:13,529 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37629-0x101a6a890970002, quorum=127.0.0.1:49240, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-05T12:02:13,529 DEBUG [pool-63-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42913-0x101a6a890970001, quorum=127.0.0.1:49240, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-05T12:02:13,529 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34947-0x101a6a890970000, quorum=127.0.0.1:49240, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-05T12:02:13,529 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-05T12:02:13,529 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-05T12:02:13,529 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-05T12:02:13,529 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-05T12:02:13,530 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=80666a11a09f,37629,1733400130958 2024-12-05T12:02:13,536 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-12-05T12:02:13,536 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=80666a11a09f,37629,1733400130958 in 307 msec 2024-12-05T12:02:13,548 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-12-05T12:02:13,549 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 866 msec 2024-12-05T12:02:13,550 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-05T12:02:13,551 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-12-05T12:02:13,572 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-05T12:02:13,574 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=80666a11a09f,37629,1733400130958, seqNum=-1] 2024-12-05T12:02:13,597 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-05T12:02:13,601 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48715, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-05T12:02:13,686 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 1.2820 sec 2024-12-05T12:02:13,686 INFO [master/80666a11a09f:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1733400133686, completionTime=-1 2024-12-05T12:02:13,690 INFO [master/80666a11a09f:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=3; waited=0ms, expected min=3 server(s), max=3 server(s), master is running 2024-12-05T12:02:13,690 DEBUG [master/80666a11a09f:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-12-05T12:02:13,739 INFO [master/80666a11a09f:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=3 2024-12-05T12:02:13,740 INFO [master/80666a11a09f:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1733400193740 2024-12-05T12:02:13,740 INFO [master/80666a11a09f:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1733400253740 2024-12-05T12:02:13,740 INFO [master/80666a11a09f:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 49 msec 2024-12-05T12:02:13,742 DEBUG [master/80666a11a09f:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 1588230740 changed from -1.0 to 0.0, refreshing cache 2024-12-05T12:02:13,750 INFO [master/80666a11a09f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=80666a11a09f,34947,1733400129965-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-05T12:02:13,750 INFO [master/80666a11a09f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=80666a11a09f,34947,1733400129965-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-05T12:02:13,750 INFO [master/80666a11a09f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=80666a11a09f,34947,1733400129965-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-05T12:02:13,753 INFO [master/80666a11a09f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-80666a11a09f:34947, period=300000, unit=MILLISECONDS is enabled. 2024-12-05T12:02:13,753 INFO [master/80666a11a09f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-05T12:02:13,754 INFO [master/80666a11a09f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-12-05T12:02:13,763 DEBUG [master/80666a11a09f:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-12-05T12:02:13,789 INFO [master/80666a11a09f:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 2.641sec 2024-12-05T12:02:13,792 INFO [master/80666a11a09f:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-05T12:02:13,794 INFO [master/80666a11a09f:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-05T12:02:13,795 INFO [master/80666a11a09f:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-05T12:02:13,795 INFO [master/80666a11a09f:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-05T12:02:13,795 INFO [master/80666a11a09f:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-05T12:02:13,796 INFO [master/80666a11a09f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=80666a11a09f,34947,1733400129965-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-05T12:02:13,797 INFO [master/80666a11a09f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=80666a11a09f,34947,1733400129965-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-05T12:02:13,821 DEBUG [master/80666a11a09f:0:becomeActiveMaster {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-12-05T12:02:13,821 DEBUG [master/80666a11a09f:0:becomeActiveMaster {}] client.AsyncConnectionImpl(321): The fetched master address is 80666a11a09f,34947,1733400129965 2024-12-05T12:02:13,824 DEBUG [master/80666a11a09f:0:becomeActiveMaster {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@25ca3d9d 2024-12-05T12:02:13,826 DEBUG [master/80666a11a09f:0:becomeActiveMaster {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-05T12:02:13,829 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41843, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-05T12:02:13,836 INFO [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=34947 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'hbase:acl', {NAME => 'l', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-05T12:02:13,846 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=34947 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=hbase:acl 2024-12-05T12:02:13,849 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=hbase:acl execute state=CREATE_TABLE_PRE_OPERATION 2024-12-05T12:02:13,850 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T12:02:13,852 INFO [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=34947 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "hbase" qualifier: "acl" procId is: 4 2024-12-05T12:02:13,853 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=hbase:acl execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-05T12:02:13,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-05T12:02:13,896 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@412aa6f5, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T12:02:13,902 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073741837_1013 (size=349) 2024-12-05T12:02:13,903 DEBUG [Time-limited test {}] nio.NioEventLoop(110): -Dio.netty.noKeySetOptimization: false 2024-12-05T12:02:13,903 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073741837_1013 (size=349) 2024-12-05T12:02:13,903 DEBUG [Time-limited test {}] nio.NioEventLoop(111): -Dio.netty.selectorAutoRebuildThreshold: 512 2024-12-05T12:02:13,903 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073741837_1013 (size=349) 2024-12-05T12:02:13,907 INFO [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 02119c1ced80736cbd0ab02ca19ee30c, NAME => 'hbase:acl,,1733400133831.02119c1ced80736cbd0ab02ca19ee30c.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:acl', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'l', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300 2024-12-05T12:02:13,908 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 80666a11a09f,34947,-1 for getting cluster id 2024-12-05T12:02:13,911 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-05T12:02:13,928 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'af3bc8f5-5d92-4279-9ca6-4b73c59b8f54' 2024-12-05T12:02:13,930 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-05T12:02:13,931 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "af3bc8f5-5d92-4279-9ca6-4b73c59b8f54" 2024-12-05T12:02:13,931 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@33e325d4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T12:02:13,931 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [80666a11a09f,34947,-1] 2024-12-05T12:02:13,934 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-05T12:02:13,939 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073741838_1014 (size=36) 2024-12-05T12:02:13,940 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T12:02:13,941 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073741838_1014 (size=36) 2024-12-05T12:02:13,942 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073741838_1014 (size=36) 2024-12-05T12:02:13,942 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44106, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-05T12:02:13,945 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@17944895, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T12:02:13,946 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-05T12:02:13,949 DEBUG [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(898): Instantiated hbase:acl,,1733400133831.02119c1ced80736cbd0ab02ca19ee30c.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-05T12:02:13,950 DEBUG [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(1722): Closing 02119c1ced80736cbd0ab02ca19ee30c, disabling compactions & flushes 2024-12-05T12:02:13,950 INFO [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(1755): Closing region hbase:acl,,1733400133831.02119c1ced80736cbd0ab02ca19ee30c. 2024-12-05T12:02:13,950 DEBUG [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:acl,,1733400133831.02119c1ced80736cbd0ab02ca19ee30c. 2024-12-05T12:02:13,950 DEBUG [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on hbase:acl,,1733400133831.02119c1ced80736cbd0ab02ca19ee30c. after waiting 0 ms 2024-12-05T12:02:13,950 DEBUG [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region hbase:acl,,1733400133831.02119c1ced80736cbd0ab02ca19ee30c. 2024-12-05T12:02:13,950 INFO [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(1973): Closed hbase:acl,,1733400133831.02119c1ced80736cbd0ab02ca19ee30c. 2024-12-05T12:02:13,950 DEBUG [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(1676): Region close journal for 02119c1ced80736cbd0ab02ca19ee30c: Waiting for close lock at 1733400133949Disabling compacts and flushes for region at 1733400133949Disabling writes for close at 1733400133950 (+1 ms)Writing region close event to WAL at 1733400133950Closed at 1733400133950 2024-12-05T12:02:13,955 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=hbase:acl execute state=CREATE_TABLE_ADD_TO_META 2024-12-05T12:02:13,956 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=80666a11a09f,37629,1733400130958, seqNum=-1] 2024-12-05T12:02:13,956 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-05T12:02:13,962 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"hbase:acl,,1733400133831.02119c1ced80736cbd0ab02ca19ee30c.","families":{"info":[{"qualifier":"regioninfo","vlen":35,"tag":[],"timestamp":"1733400133956"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733400133956"}]},"ts":"1733400133956"} 2024-12-05T12:02:13,970 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-12-05T12:02:13,974 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=hbase:acl execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-05T12:02:13,977 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"hbase:acl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733400133974"}]},"ts":"1733400133974"} 2024-12-05T12:02:13,977 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34398, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-05T12:02:13,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-05T12:02:13,982 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=80666a11a09f,34947,1733400129965 2024-12-05T12:02:13,982 INFO [Time-limited test {}] hbase.HBaseTestingUtil(2305): Starting mini mapreduce cluster... 2024-12-05T12:02:13,982 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/b25dcafd-a00c-c924-93f8-4d057d2ca201/test.cache.data in system properties and HBase conf 2024-12-05T12:02:13,982 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/b25dcafd-a00c-c924-93f8-4d057d2ca201/hadoop.tmp.dir in system properties and HBase conf 2024-12-05T12:02:13,982 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/b25dcafd-a00c-c924-93f8-4d057d2ca201/hadoop.log.dir in system properties and HBase conf 2024-12-05T12:02:13,982 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/b25dcafd-a00c-c924-93f8-4d057d2ca201/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-05T12:02:13,982 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/b25dcafd-a00c-c924-93f8-4d057d2ca201/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-05T12:02:13,982 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-12-05T12:02:13,983 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/b25dcafd-a00c-c924-93f8-4d057d2ca201/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-05T12:02:13,983 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/b25dcafd-a00c-c924-93f8-4d057d2ca201/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-05T12:02:13,983 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/b25dcafd-a00c-c924-93f8-4d057d2ca201/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-05T12:02:13,983 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/b25dcafd-a00c-c924-93f8-4d057d2ca201/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-05T12:02:13,983 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/b25dcafd-a00c-c924-93f8-4d057d2ca201/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-05T12:02:13,983 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/b25dcafd-a00c-c924-93f8-4d057d2ca201/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-05T12:02:13,983 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/b25dcafd-a00c-c924-93f8-4d057d2ca201/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-05T12:02:13,983 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/b25dcafd-a00c-c924-93f8-4d057d2ca201/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-05T12:02:13,983 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/b25dcafd-a00c-c924-93f8-4d057d2ca201/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-05T12:02:13,983 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/b25dcafd-a00c-c924-93f8-4d057d2ca201/nfs.dump.dir in system properties and HBase conf 2024-12-05T12:02:13,984 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/b25dcafd-a00c-c924-93f8-4d057d2ca201/java.io.tmpdir in system properties and HBase conf 2024-12-05T12:02:13,984 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/b25dcafd-a00c-c924-93f8-4d057d2ca201/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-05T12:02:13,984 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/b25dcafd-a00c-c924-93f8-4d057d2ca201/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-05T12:02:13,984 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/b25dcafd-a00c-c924-93f8-4d057d2ca201/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-05T12:02:13,984 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=hbase:acl, state=ENABLING in hbase:meta 2024-12-05T12:02:13,985 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(204): Hosts are {80666a11a09f=0} racks are {/default-rack=0} 2024-12-05T12:02:13,994 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-05T12:02:13,995 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-05T12:02:13,995 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-05T12:02:13,995 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-05T12:02:13,995 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-05T12:02:13,995 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-05T12:02:13,995 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-05T12:02:13,995 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-05T12:02:13,995 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-05T12:02:13,995 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-05T12:02:13,997 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:acl, region=02119c1ced80736cbd0ab02ca19ee30c, ASSIGN}] 2024-12-05T12:02:14,002 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:acl, region=02119c1ced80736cbd0ab02ca19ee30c, ASSIGN 2024-12-05T12:02:14,005 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:acl, region=02119c1ced80736cbd0ab02ca19ee30c, ASSIGN; state=OFFLINE, location=80666a11a09f,42305,1733400131012; forceNewPlan=false, retain=false 2024-12-05T12:02:14,160 INFO [80666a11a09f:34947 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-12-05T12:02:14,161 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=02119c1ced80736cbd0ab02ca19ee30c, regionState=OPENING, regionLocation=80666a11a09f,42305,1733400131012 2024-12-05T12:02:14,167 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:acl, region=02119c1ced80736cbd0ab02ca19ee30c, ASSIGN because future has completed 2024-12-05T12:02:14,173 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073741839_1015 (size=592039) 2024-12-05T12:02:14,173 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073741839_1015 (size=592039) 2024-12-05T12:02:14,174 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073741839_1015 (size=592039) 2024-12-05T12:02:14,175 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 02119c1ced80736cbd0ab02ca19ee30c, server=80666a11a09f,42305,1733400131012}] 2024-12-05T12:02:14,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-05T12:02:14,242 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073741840_1016 (size=1663647) 2024-12-05T12:02:14,242 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073741840_1016 (size=1663647) 2024-12-05T12:02:14,243 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073741840_1016 (size=1663647) 2024-12-05T12:02:14,355 DEBUG [RSProcedureDispatcher-pool-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-05T12:02:14,388 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44573, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-05T12:02:14,400 INFO [RS_OPEN_PRIORITY_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] handler.AssignRegionHandler(132): Open hbase:acl,,1733400133831.02119c1ced80736cbd0ab02ca19ee30c. 2024-12-05T12:02:14,400 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 02119c1ced80736cbd0ab02ca19ee30c, NAME => 'hbase:acl,,1733400133831.02119c1ced80736cbd0ab02ca19ee30c.', STARTKEY => '', ENDKEY => ''} 2024-12-05T12:02:14,401 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:acl,,1733400133831.02119c1ced80736cbd0ab02ca19ee30c. service=AccessControlService 2024-12-05T12:02:14,401 INFO [RS_OPEN_PRIORITY_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-05T12:02:14,402 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table acl 02119c1ced80736cbd0ab02ca19ee30c 2024-12-05T12:02:14,402 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(898): Instantiated hbase:acl,,1733400133831.02119c1ced80736cbd0ab02ca19ee30c.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-05T12:02:14,402 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 02119c1ced80736cbd0ab02ca19ee30c 2024-12-05T12:02:14,402 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 02119c1ced80736cbd0ab02ca19ee30c 2024-12-05T12:02:14,407 INFO [StoreOpener-02119c1ced80736cbd0ab02ca19ee30c-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family l of region 02119c1ced80736cbd0ab02ca19ee30c 2024-12-05T12:02:14,410 INFO [StoreOpener-02119c1ced80736cbd0ab02ca19ee30c-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 02119c1ced80736cbd0ab02ca19ee30c columnFamilyName l 2024-12-05T12:02:14,410 DEBUG [StoreOpener-02119c1ced80736cbd0ab02ca19ee30c-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T12:02:14,412 INFO [StoreOpener-02119c1ced80736cbd0ab02ca19ee30c-1 {}] regionserver.HStore(327): Store=02119c1ced80736cbd0ab02ca19ee30c/l, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-05T12:02:14,412 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 02119c1ced80736cbd0ab02ca19ee30c 2024-12-05T12:02:14,422 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/hbase/acl/02119c1ced80736cbd0ab02ca19ee30c 2024-12-05T12:02:14,423 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/hbase/acl/02119c1ced80736cbd0ab02ca19ee30c 2024-12-05T12:02:14,424 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 02119c1ced80736cbd0ab02ca19ee30c 2024-12-05T12:02:14,424 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 02119c1ced80736cbd0ab02ca19ee30c 2024-12-05T12:02:14,429 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 02119c1ced80736cbd0ab02ca19ee30c 2024-12-05T12:02:14,434 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/hbase/acl/02119c1ced80736cbd0ab02ca19ee30c/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-05T12:02:14,436 INFO [RS_OPEN_PRIORITY_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1114): Opened 02119c1ced80736cbd0ab02ca19ee30c; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=60664631, jitterRate=-0.09602655470371246}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-05T12:02:14,436 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 02119c1ced80736cbd0ab02ca19ee30c 2024-12-05T12:02:14,438 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 02119c1ced80736cbd0ab02ca19ee30c: Running coprocessor pre-open hook at 1733400134403Writing region info on filesystem at 1733400134403Initializing all the Stores at 1733400134406 (+3 ms)Instantiating store for column family {NAME => 'l', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733400134406Cleaning up temporary data from old regions at 1733400134424 (+18 ms)Running coprocessor post-open hooks at 1733400134436 (+12 ms)Region opened successfully at 1733400134438 (+2 ms) 2024-12-05T12:02:14,442 INFO [RS_OPEN_PRIORITY_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:acl,,1733400133831.02119c1ced80736cbd0ab02ca19ee30c., pid=6, masterSystemTime=1733400134355 2024-12-05T12:02:14,449 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:acl,,1733400133831.02119c1ced80736cbd0ab02ca19ee30c. 2024-12-05T12:02:14,449 INFO [RS_OPEN_PRIORITY_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] handler.AssignRegionHandler(153): Opened hbase:acl,,1733400133831.02119c1ced80736cbd0ab02ca19ee30c. 2024-12-05T12:02:14,452 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=02119c1ced80736cbd0ab02ca19ee30c, regionState=OPEN, openSeqNum=2, regionLocation=80666a11a09f,42305,1733400131012 2024-12-05T12:02:14,459 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 02119c1ced80736cbd0ab02ca19ee30c, server=80666a11a09f,42305,1733400131012 because future has completed 2024-12-05T12:02:14,467 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-12-05T12:02:14,467 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 02119c1ced80736cbd0ab02ca19ee30c, server=80666a11a09f,42305,1733400131012 in 287 msec 2024-12-05T12:02:14,475 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-12-05T12:02:14,475 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:acl, region=02119c1ced80736cbd0ab02ca19ee30c, ASSIGN in 470 msec 2024-12-05T12:02:14,480 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=hbase:acl execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-05T12:02:14,481 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"hbase:acl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733400134480"}]},"ts":"1733400134480"} 2024-12-05T12:02:14,486 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=hbase:acl, state=ENABLED in hbase:meta 2024-12-05T12:02:14,489 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=hbase:acl execute state=CREATE_TABLE_POST_OPERATION 2024-12-05T12:02:14,499 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=hbase:acl in 649 msec 2024-12-05T12:02:14,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-05T12:02:15,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-05T12:02:15,010 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: hbase:acl completed 2024-12-05T12:02:15,031 DEBUG [master/80666a11a09f:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-12-05T12:02:15,032 INFO [master/80666a11a09f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-05T12:02:15,033 INFO [master/80666a11a09f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=80666a11a09f,34947,1733400129965-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-05T12:02:16,109 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-05T12:02:16,226 WARN [Thread-383 {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-05T12:02:16,540 INFO [Thread-383 {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-05T12:02:16,540 WARN [Time-limited test {}] servlet.GuiceFilter(102): Multiple Servlet injectors detected. This is a warning indicating that you have more than one GuiceFilter running in your web application. If this is deliberate, you may safely ignore this message. If this is NOT deliberate however, your application may not work as expected. 2024-12-05T12:02:16,541 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-05T12:02:16,563 INFO [Thread-383 {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-05T12:02:16,563 INFO [Thread-383 {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-05T12:02:16,563 INFO [Thread-383 {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-05T12:02:16,564 INFO [Thread-383 {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@cf9b147{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/b25dcafd-a00c-c924-93f8-4d057d2ca201/hadoop.log.dir/,AVAILABLE} 2024-12-05T12:02:16,565 INFO [Thread-383 {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4846b1a3{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/static,AVAILABLE} 2024-12-05T12:02:16,607 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-05T12:02:16,607 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-05T12:02:16,607 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-05T12:02:16,610 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-05T12:02:16,619 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@59ddc6ea{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/b25dcafd-a00c-c924-93f8-4d057d2ca201/hadoop.log.dir/,AVAILABLE} 2024-12-05T12:02:16,620 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7c3b81b7{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/static,AVAILABLE} 2024-12-05T12:02:16,755 INFO [Thread-383 {}] container.GuiceComponentProviderFactory(113): Registering org.apache.hadoop.yarn.server.resourcemanager.webapp.JAXBContextResolver as a provider class 2024-12-05T12:02:16,756 INFO [Thread-383 {}] container.GuiceComponentProviderFactory(116): Registering org.apache.hadoop.yarn.server.resourcemanager.webapp.RMWebServices as a root resource class 2024-12-05T12:02:16,756 INFO [Thread-383 {}] container.GuiceComponentProviderFactory(113): Registering org.apache.hadoop.yarn.webapp.GenericExceptionHandler as a provider class 2024-12-05T12:02:16,758 INFO [Thread-383 {}] application.WebApplicationImpl(815): Initiating Jersey application, version 'Jersey: 1.19.4 05/24/2017 03:20 PM' 2024-12-05T12:02:16,836 INFO [Thread-383 {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.yarn.server.resourcemanager.webapp.JAXBContextResolver to GuiceManagedComponentProvider with the scope "Singleton" 2024-12-05T12:02:17,248 INFO [Thread-383 {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.yarn.webapp.GenericExceptionHandler to GuiceManagedComponentProvider with the scope "Singleton" 2024-12-05T12:02:17,705 INFO [Thread-383 {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.yarn.server.resourcemanager.webapp.RMWebServices to GuiceManagedComponentProvider with the scope "Singleton" 2024-12-05T12:02:17,748 INFO [Thread-383 {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@628fba2{jobhistory,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/b25dcafd-a00c-c924-93f8-4d057d2ca201/java.io.tmpdir/jetty-localhost-45237-hadoop-yarn-common-3_4_1_jar-_-any-8695859235935138971/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/jobhistory} 2024-12-05T12:02:17,748 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@2922a925{cluster,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/b25dcafd-a00c-c924-93f8-4d057d2ca201/java.io.tmpdir/jetty-localhost-39833-hadoop-yarn-common-3_4_1_jar-_-any-2079122063325657013/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/cluster} 2024-12-05T12:02:17,749 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@78b8974f{HTTP/1.1, (http/1.1)}{localhost:39833} 2024-12-05T12:02:17,749 INFO [Time-limited test {}] server.Server(415): Started @15509ms 2024-12-05T12:02:17,749 INFO [Thread-383 {}] server.AbstractConnector(333): Started ServerConnector@adf2375{HTTP/1.1, (http/1.1)}{localhost:45237} 2024-12-05T12:02:17,749 INFO [Thread-383 {}] server.Server(415): Started @15509ms 2024-12-05T12:02:17,930 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073741841_1017 (size=5) 2024-12-05T12:02:17,931 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073741841_1017 (size=5) 2024-12-05T12:02:17,933 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073741841_1017 (size=5) 2024-12-05T12:02:18,599 WARN [Time-limited test {}] tracker.NMLogAggregationStatusTracker(95): Log Aggregation is disabled.So is the LogAggregationStatusTracker. 2024-12-05T12:02:18,604 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-05T12:02:18,647 WARN [Time-limited test {}] servlet.GuiceFilter(102): Multiple Servlet injectors detected. This is a warning indicating that you have more than one GuiceFilter running in your web application. If this is deliberate, you may safely ignore this message. If this is NOT deliberate however, your application may not work as expected. 2024-12-05T12:02:18,653 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-05T12:02:18,699 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-05T12:02:18,699 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-05T12:02:18,699 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-05T12:02:18,701 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-05T12:02:18,702 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4659cb26{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/b25dcafd-a00c-c924-93f8-4d057d2ca201/hadoop.log.dir/,AVAILABLE} 2024-12-05T12:02:18,703 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@26e39968{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/static,AVAILABLE} 2024-12-05T12:02:18,769 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(116): Registering org.apache.hadoop.yarn.server.nodemanager.webapp.NMWebServices as a root resource class 2024-12-05T12:02:18,770 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(113): Registering org.apache.hadoop.yarn.webapp.GenericExceptionHandler as a provider class 2024-12-05T12:02:18,770 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(113): Registering org.apache.hadoop.yarn.server.nodemanager.webapp.JAXBContextResolver as a provider class 2024-12-05T12:02:18,770 INFO [Time-limited test {}] application.WebApplicationImpl(815): Initiating Jersey application, version 'Jersey: 1.19.4 05/24/2017 03:20 PM' 2024-12-05T12:02:18,806 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.yarn.server.nodemanager.webapp.JAXBContextResolver to GuiceManagedComponentProvider with the scope "Singleton" 2024-12-05T12:02:18,840 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.yarn.webapp.GenericExceptionHandler to GuiceManagedComponentProvider with the scope "Singleton" 2024-12-05T12:02:18,978 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-05T12:02:19,004 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.yarn.server.nodemanager.webapp.NMWebServices to GuiceManagedComponentProvider with the scope "Singleton" 2024-12-05T12:02:19,018 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@74d24973{node,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/b25dcafd-a00c-c924-93f8-4d057d2ca201/java.io.tmpdir/jetty-localhost-45293-hadoop-yarn-common-3_4_1_jar-_-any-2454670580872397858/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/node} 2024-12-05T12:02:19,019 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@31d1c277{HTTP/1.1, (http/1.1)}{localhost:45293} 2024-12-05T12:02:19,019 INFO [Time-limited test {}] server.Server(415): Started @16779ms 2024-12-05T12:02:19,082 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-12-05T12:02:19,084 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:acl' 2024-12-05T12:02:19,355 WARN [Time-limited test {}] tracker.NMLogAggregationStatusTracker(95): Log Aggregation is disabled.So is the LogAggregationStatusTracker. 2024-12-05T12:02:19,359 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-05T12:02:19,380 WARN [Time-limited test {}] servlet.GuiceFilter(102): Multiple Servlet injectors detected. This is a warning indicating that you have more than one GuiceFilter running in your web application. If this is deliberate, you may safely ignore this message. If this is NOT deliberate however, your application may not work as expected. 2024-12-05T12:02:19,381 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-05T12:02:19,413 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-05T12:02:19,413 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-05T12:02:19,414 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-05T12:02:19,415 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-05T12:02:19,419 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@67e1cde8{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/b25dcafd-a00c-c924-93f8-4d057d2ca201/hadoop.log.dir/,AVAILABLE} 2024-12-05T12:02:19,419 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@514d5141{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/static,AVAILABLE} 2024-12-05T12:02:19,471 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(116): Registering org.apache.hadoop.yarn.server.nodemanager.webapp.NMWebServices as a root resource class 2024-12-05T12:02:19,471 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(113): Registering org.apache.hadoop.yarn.webapp.GenericExceptionHandler as a provider class 2024-12-05T12:02:19,471 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(113): Registering org.apache.hadoop.yarn.server.nodemanager.webapp.JAXBContextResolver as a provider class 2024-12-05T12:02:19,472 INFO [Time-limited test {}] application.WebApplicationImpl(815): Initiating Jersey application, version 'Jersey: 1.19.4 05/24/2017 03:20 PM' 2024-12-05T12:02:19,481 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.yarn.server.nodemanager.webapp.JAXBContextResolver to GuiceManagedComponentProvider with the scope "Singleton" 2024-12-05T12:02:19,488 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.yarn.webapp.GenericExceptionHandler to GuiceManagedComponentProvider with the scope "Singleton" 2024-12-05T12:02:19,605 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.yarn.server.nodemanager.webapp.NMWebServices to GuiceManagedComponentProvider with the scope "Singleton" 2024-12-05T12:02:19,623 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@827304f{node,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/b25dcafd-a00c-c924-93f8-4d057d2ca201/java.io.tmpdir/jetty-localhost-38623-hadoop-yarn-common-3_4_1_jar-_-any-5615241636066654144/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/node} 2024-12-05T12:02:19,624 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@1bf118dd{HTTP/1.1, (http/1.1)}{localhost:38623} 2024-12-05T12:02:19,625 INFO [Time-limited test {}] server.Server(415): Started @17384ms 2024-12-05T12:02:19,661 INFO [Time-limited test {}] hbase.HBaseTestingUtil(2341): Mini mapreduce cluster started 2024-12-05T12:02:19,663 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [30,000] milli-secs(wait.for.ratio=[1]) 2024-12-05T12:02:19,702 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestMobSecureExportSnapshot#testExportFileSystemStateWithSplitRegion Thread=719, OpenFileDescriptor=780, MaxFileDescriptor=1048576, SystemLoadAverage=595, ProcessCount=11, AvailableMemoryMB=6312 2024-12-05T12:02:19,707 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=719 is superior to 500 2024-12-05T12:02:19,714 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-12-05T12:02:19,720 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] client.AsyncConnectionImpl(321): The fetched master address is 80666a11a09f,34947,1733400129965 2024-12-05T12:02:19,720 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@3319ca7a 2024-12-05T12:02:19,721 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-05T12:02:19,724 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33092, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-05T12:02:19,725 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'testtb-testExportFileSystemStateWithSplitRegion', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-05T12:02:19,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testtb-testExportFileSystemStateWithSplitRegion 2024-12-05T12:02:19,732 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=7, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSplitRegion execute state=CREATE_TABLE_PRE_OPERATION 2024-12-05T12:02:19,734 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testExportFileSystemStateWithSplitRegion" procId is: 7 2024-12-05T12:02:19,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-05T12:02:19,738 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=7, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSplitRegion execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-05T12:02:19,757 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073741842_1018 (size=458) 2024-12-05T12:02:19,758 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073741842_1018 (size=458) 2024-12-05T12:02:19,758 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073741842_1018 (size=458) 2024-12-05T12:02:19,763 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => f4bc653ea89173afedeec879d98060f1, NAME => 'testtb-testExportFileSystemStateWithSplitRegion,,1733400139725.f4bc653ea89173afedeec879d98060f1.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testExportFileSystemStateWithSplitRegion', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300 2024-12-05T12:02:19,764 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => 446097405ca8c47d8ea55b39855ba137, NAME => 'testtb-testExportFileSystemStateWithSplitRegion,1,1733400139725.446097405ca8c47d8ea55b39855ba137.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testExportFileSystemStateWithSplitRegion', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300 2024-12-05T12:02:19,790 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073741843_1019 (size=83) 2024-12-05T12:02:19,791 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073741843_1019 (size=83) 2024-12-05T12:02:19,791 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073741843_1019 (size=83) 2024-12-05T12:02:19,792 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073741844_1020 (size=83) 2024-12-05T12:02:19,792 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-1 {}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithSplitRegion,1,1733400139725.446097405ca8c47d8ea55b39855ba137.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-05T12:02:19,793 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-1 {}] regionserver.HRegion(1722): Closing 446097405ca8c47d8ea55b39855ba137, disabling compactions & flushes 2024-12-05T12:02:19,793 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-1 {}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithSplitRegion,1,1733400139725.446097405ca8c47d8ea55b39855ba137. 2024-12-05T12:02:19,793 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithSplitRegion,1,1733400139725.446097405ca8c47d8ea55b39855ba137. 2024-12-05T12:02:19,793 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithSplitRegion,1,1733400139725.446097405ca8c47d8ea55b39855ba137. after waiting 0 ms 2024-12-05T12:02:19,793 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithSplitRegion,1,1733400139725.446097405ca8c47d8ea55b39855ba137. 2024-12-05T12:02:19,793 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-1 {}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithSplitRegion,1,1733400139725.446097405ca8c47d8ea55b39855ba137. 2024-12-05T12:02:19,793 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithSplitRegion,,1733400139725.f4bc653ea89173afedeec879d98060f1.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-05T12:02:19,793 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-1 {}] regionserver.HRegion(1676): Region close journal for 446097405ca8c47d8ea55b39855ba137: Waiting for close lock at 1733400139793Disabling compacts and flushes for region at 1733400139793Disabling writes for close at 1733400139793Writing region close event to WAL at 1733400139793Closed at 1733400139793 2024-12-05T12:02:19,793 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(1722): Closing f4bc653ea89173afedeec879d98060f1, disabling compactions & flushes 2024-12-05T12:02:19,793 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithSplitRegion,,1733400139725.f4bc653ea89173afedeec879d98060f1. 2024-12-05T12:02:19,793 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithSplitRegion,,1733400139725.f4bc653ea89173afedeec879d98060f1. 2024-12-05T12:02:19,793 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073741844_1020 (size=83) 2024-12-05T12:02:19,793 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithSplitRegion,,1733400139725.f4bc653ea89173afedeec879d98060f1. after waiting 0 ms 2024-12-05T12:02:19,793 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithSplitRegion,,1733400139725.f4bc653ea89173afedeec879d98060f1. 2024-12-05T12:02:19,793 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithSplitRegion,,1733400139725.f4bc653ea89173afedeec879d98060f1. 2024-12-05T12:02:19,794 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073741844_1020 (size=83) 2024-12-05T12:02:19,794 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(1676): Region close journal for f4bc653ea89173afedeec879d98060f1: Waiting for close lock at 1733400139793Disabling compacts and flushes for region at 1733400139793Disabling writes for close at 1733400139793Writing region close event to WAL at 1733400139793Closed at 1733400139793 2024-12-05T12:02:19,796 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=7, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSplitRegion execute state=CREATE_TABLE_ADD_TO_META 2024-12-05T12:02:19,796 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportFileSystemStateWithSplitRegion,1,1733400139725.446097405ca8c47d8ea55b39855ba137.","families":{"info":[{"qualifier":"regioninfo","vlen":82,"tag":[],"timestamp":"1733400139796"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733400139796"}]},"ts":"1733400139796"} 2024-12-05T12:02:19,796 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportFileSystemStateWithSplitRegion,,1733400139725.f4bc653ea89173afedeec879d98060f1.","families":{"info":[{"qualifier":"regioninfo","vlen":82,"tag":[],"timestamp":"1733400139796"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733400139796"}]},"ts":"1733400139796"} 2024-12-05T12:02:19,831 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-12-05T12:02:19,833 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=7, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSplitRegion execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-05T12:02:19,833 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSplitRegion","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733400139833"}]},"ts":"1733400139833"} 2024-12-05T12:02:19,837 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithSplitRegion, state=ENABLING in hbase:meta 2024-12-05T12:02:19,837 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(204): Hosts are {80666a11a09f=0} racks are {/default-rack=0} 2024-12-05T12:02:19,839 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-05T12:02:19,840 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-05T12:02:19,840 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-05T12:02:19,840 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-05T12:02:19,840 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-05T12:02:19,840 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-05T12:02:19,840 INFO [PEWorker-2 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-05T12:02:19,840 INFO [PEWorker-2 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-05T12:02:19,840 INFO [PEWorker-2 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-05T12:02:19,840 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-05T12:02:19,840 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=f4bc653ea89173afedeec879d98060f1, ASSIGN}, {pid=9, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=446097405ca8c47d8ea55b39855ba137, ASSIGN}] 2024-12-05T12:02:19,843 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=f4bc653ea89173afedeec879d98060f1, ASSIGN 2024-12-05T12:02:19,843 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=9, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=446097405ca8c47d8ea55b39855ba137, ASSIGN 2024-12-05T12:02:19,844 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(269): Starting pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=f4bc653ea89173afedeec879d98060f1, ASSIGN; state=OFFLINE, location=80666a11a09f,37629,1733400130958; forceNewPlan=false, retain=false 2024-12-05T12:02:19,844 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=9, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=446097405ca8c47d8ea55b39855ba137, ASSIGN; state=OFFLINE, location=80666a11a09f,42305,1733400131012; forceNewPlan=false, retain=false 2024-12-05T12:02:19,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-05T12:02:19,995 INFO [80666a11a09f:34947 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-12-05T12:02:19,995 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=8 updating hbase:meta row=f4bc653ea89173afedeec879d98060f1, regionState=OPENING, regionLocation=80666a11a09f,37629,1733400130958 2024-12-05T12:02:19,995 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=9 updating hbase:meta row=446097405ca8c47d8ea55b39855ba137, regionState=OPENING, regionLocation=80666a11a09f,42305,1733400131012 2024-12-05T12:02:19,999 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=9, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=446097405ca8c47d8ea55b39855ba137, ASSIGN because future has completed 2024-12-05T12:02:20,000 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=10, ppid=9, state=RUNNABLE, hasLock=false; OpenRegionProcedure 446097405ca8c47d8ea55b39855ba137, server=80666a11a09f,42305,1733400131012}] 2024-12-05T12:02:20,001 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=f4bc653ea89173afedeec879d98060f1, ASSIGN because future has completed 2024-12-05T12:02:20,002 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=11, ppid=8, state=RUNNABLE, hasLock=false; OpenRegionProcedure f4bc653ea89173afedeec879d98060f1, server=80666a11a09f,37629,1733400130958}] 2024-12-05T12:02:20,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-05T12:02:20,172 INFO [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] handler.AssignRegionHandler(132): Open testtb-testExportFileSystemStateWithSplitRegion,1,1733400139725.446097405ca8c47d8ea55b39855ba137. 2024-12-05T12:02:20,172 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(7752): Opening region: {ENCODED => 446097405ca8c47d8ea55b39855ba137, NAME => 'testtb-testExportFileSystemStateWithSplitRegion,1,1733400139725.446097405ca8c47d8ea55b39855ba137.', STARTKEY => '1', ENDKEY => ''} 2024-12-05T12:02:20,173 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportFileSystemStateWithSplitRegion,1,1733400139725.446097405ca8c47d8ea55b39855ba137. service=AccessControlService 2024-12-05T12:02:20,173 INFO [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-05T12:02:20,173 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemStateWithSplitRegion 446097405ca8c47d8ea55b39855ba137 2024-12-05T12:02:20,173 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithSplitRegion,1,1733400139725.446097405ca8c47d8ea55b39855ba137.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-05T12:02:20,173 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(7794): checking encryption for 446097405ca8c47d8ea55b39855ba137 2024-12-05T12:02:20,174 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(7797): checking classloading for 446097405ca8c47d8ea55b39855ba137 2024-12-05T12:02:20,182 INFO [StoreOpener-446097405ca8c47d8ea55b39855ba137-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 446097405ca8c47d8ea55b39855ba137 2024-12-05T12:02:20,191 INFO [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] handler.AssignRegionHandler(132): Open testtb-testExportFileSystemStateWithSplitRegion,,1733400139725.f4bc653ea89173afedeec879d98060f1. 2024-12-05T12:02:20,191 INFO [StoreOpener-446097405ca8c47d8ea55b39855ba137-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 446097405ca8c47d8ea55b39855ba137 columnFamilyName cf 2024-12-05T12:02:20,191 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7752): Opening region: {ENCODED => f4bc653ea89173afedeec879d98060f1, NAME => 'testtb-testExportFileSystemStateWithSplitRegion,,1733400139725.f4bc653ea89173afedeec879d98060f1.', STARTKEY => '', ENDKEY => '1'} 2024-12-05T12:02:20,191 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportFileSystemStateWithSplitRegion,,1733400139725.f4bc653ea89173afedeec879d98060f1. service=AccessControlService 2024-12-05T12:02:20,192 INFO [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-05T12:02:20,192 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemStateWithSplitRegion f4bc653ea89173afedeec879d98060f1 2024-12-05T12:02:20,192 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithSplitRegion,,1733400139725.f4bc653ea89173afedeec879d98060f1.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-05T12:02:20,192 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7794): checking encryption for f4bc653ea89173afedeec879d98060f1 2024-12-05T12:02:20,192 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7797): checking classloading for f4bc653ea89173afedeec879d98060f1 2024-12-05T12:02:20,196 INFO [StoreOpener-f4bc653ea89173afedeec879d98060f1-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region f4bc653ea89173afedeec879d98060f1 2024-12-05T12:02:20,199 DEBUG [StoreOpener-446097405ca8c47d8ea55b39855ba137-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T12:02:20,200 INFO [StoreOpener-f4bc653ea89173afedeec879d98060f1-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region f4bc653ea89173afedeec879d98060f1 columnFamilyName cf 2024-12-05T12:02:20,201 INFO [StoreOpener-446097405ca8c47d8ea55b39855ba137-1 {}] regionserver.HStore(327): Store=446097405ca8c47d8ea55b39855ba137/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-05T12:02:20,202 DEBUG [StoreOpener-f4bc653ea89173afedeec879d98060f1-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T12:02:20,202 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(1038): replaying wal for 446097405ca8c47d8ea55b39855ba137 2024-12-05T12:02:20,204 INFO [StoreOpener-f4bc653ea89173afedeec879d98060f1-1 {}] regionserver.HStore(327): Store=f4bc653ea89173afedeec879d98060f1/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-05T12:02:20,204 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1038): replaying wal for f4bc653ea89173afedeec879d98060f1 2024-12-05T12:02:20,206 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportFileSystemStateWithSplitRegion/446097405ca8c47d8ea55b39855ba137 2024-12-05T12:02:20,206 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportFileSystemStateWithSplitRegion/f4bc653ea89173afedeec879d98060f1 2024-12-05T12:02:20,207 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportFileSystemStateWithSplitRegion/446097405ca8c47d8ea55b39855ba137 2024-12-05T12:02:20,207 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportFileSystemStateWithSplitRegion/f4bc653ea89173afedeec879d98060f1 2024-12-05T12:02:20,208 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1048): stopping wal replay for f4bc653ea89173afedeec879d98060f1 2024-12-05T12:02:20,208 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1060): Cleaning up temporary data for f4bc653ea89173afedeec879d98060f1 2024-12-05T12:02:20,208 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(1048): stopping wal replay for 446097405ca8c47d8ea55b39855ba137 2024-12-05T12:02:20,208 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(1060): Cleaning up temporary data for 446097405ca8c47d8ea55b39855ba137 2024-12-05T12:02:20,212 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1093): writing seq id for f4bc653ea89173afedeec879d98060f1 2024-12-05T12:02:20,213 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(1093): writing seq id for 446097405ca8c47d8ea55b39855ba137 2024-12-05T12:02:20,226 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportFileSystemStateWithSplitRegion/f4bc653ea89173afedeec879d98060f1/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-05T12:02:20,228 INFO [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1114): Opened f4bc653ea89173afedeec879d98060f1; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=72424303, jitterRate=0.07920621335506439}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-05T12:02:20,228 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1122): Running coprocessor post-open hooks for f4bc653ea89173afedeec879d98060f1 2024-12-05T12:02:20,230 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1006): Region open journal for f4bc653ea89173afedeec879d98060f1: Running coprocessor pre-open hook at 1733400140192Writing region info on filesystem at 1733400140193 (+1 ms)Initializing all the Stores at 1733400140195 (+2 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733400140195Cleaning up temporary data from old regions at 1733400140208 (+13 ms)Running coprocessor post-open hooks at 1733400140228 (+20 ms)Region opened successfully at 1733400140230 (+2 ms) 2024-12-05T12:02:20,232 INFO [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportFileSystemStateWithSplitRegion,,1733400139725.f4bc653ea89173afedeec879d98060f1., pid=11, masterSystemTime=1733400140158 2024-12-05T12:02:20,236 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportFileSystemStateWithSplitRegion/446097405ca8c47d8ea55b39855ba137/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-05T12:02:20,237 INFO [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(1114): Opened 446097405ca8c47d8ea55b39855ba137; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=74364795, jitterRate=0.10812179744243622}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-05T12:02:20,238 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 446097405ca8c47d8ea55b39855ba137 2024-12-05T12:02:20,238 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(1006): Region open journal for 446097405ca8c47d8ea55b39855ba137: Running coprocessor pre-open hook at 1733400140174Writing region info on filesystem at 1733400140174Initializing all the Stores at 1733400140177 (+3 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733400140177Cleaning up temporary data from old regions at 1733400140208 (+31 ms)Running coprocessor post-open hooks at 1733400140238 (+30 ms)Region opened successfully at 1733400140238 2024-12-05T12:02:20,238 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportFileSystemStateWithSplitRegion,,1733400139725.f4bc653ea89173afedeec879d98060f1. 2024-12-05T12:02:20,238 INFO [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] handler.AssignRegionHandler(153): Opened testtb-testExportFileSystemStateWithSplitRegion,,1733400139725.f4bc653ea89173afedeec879d98060f1. 2024-12-05T12:02:20,239 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=8 updating hbase:meta row=f4bc653ea89173afedeec879d98060f1, regionState=OPEN, openSeqNum=2, regionLocation=80666a11a09f,37629,1733400130958 2024-12-05T12:02:20,242 INFO [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportFileSystemStateWithSplitRegion,1,1733400139725.446097405ca8c47d8ea55b39855ba137., pid=10, masterSystemTime=1733400140155 2024-12-05T12:02:20,245 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=11, ppid=8, state=RUNNABLE, hasLock=false; OpenRegionProcedure f4bc653ea89173afedeec879d98060f1, server=80666a11a09f,37629,1733400130958 because future has completed 2024-12-05T12:02:20,248 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportFileSystemStateWithSplitRegion,1,1733400139725.446097405ca8c47d8ea55b39855ba137. 2024-12-05T12:02:20,249 INFO [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] handler.AssignRegionHandler(153): Opened testtb-testExportFileSystemStateWithSplitRegion,1,1733400139725.446097405ca8c47d8ea55b39855ba137. 2024-12-05T12:02:20,250 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=9 updating hbase:meta row=446097405ca8c47d8ea55b39855ba137, regionState=OPEN, openSeqNum=2, regionLocation=80666a11a09f,42305,1733400131012 2024-12-05T12:02:20,256 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=10, ppid=9, state=RUNNABLE, hasLock=false; OpenRegionProcedure 446097405ca8c47d8ea55b39855ba137, server=80666a11a09f,42305,1733400131012 because future has completed 2024-12-05T12:02:20,259 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=11, resume processing ppid=8 2024-12-05T12:02:20,259 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=11, ppid=8, state=SUCCESS, hasLock=false; OpenRegionProcedure f4bc653ea89173afedeec879d98060f1, server=80666a11a09f,37629,1733400130958 in 248 msec 2024-12-05T12:02:20,263 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=f4bc653ea89173afedeec879d98060f1, ASSIGN in 419 msec 2024-12-05T12:02:20,266 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=10, resume processing ppid=9 2024-12-05T12:02:20,266 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=10, ppid=9, state=SUCCESS, hasLock=false; OpenRegionProcedure 446097405ca8c47d8ea55b39855ba137, server=80666a11a09f,42305,1733400131012 in 261 msec 2024-12-05T12:02:20,271 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=9, resume processing ppid=7 2024-12-05T12:02:20,272 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=9, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=446097405ca8c47d8ea55b39855ba137, ASSIGN in 426 msec 2024-12-05T12:02:20,273 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=7, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSplitRegion execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-05T12:02:20,274 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSplitRegion","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733400140274"}]},"ts":"1733400140274"} 2024-12-05T12:02:20,279 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithSplitRegion, state=ENABLED in hbase:meta 2024-12-05T12:02:20,281 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=7, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSplitRegion execute state=CREATE_TABLE_POST_OPERATION 2024-12-05T12:02:20,285 DEBUG [PEWorker-4 {}] access.PermissionStorage(177): Writing permission with rowKey testtb-testExportFileSystemStateWithSplitRegion jenkins: RWXCA 2024-12-05T12:02:20,303 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportFileSystemStateWithSplitRegion', locateType=CURRENT is [region=hbase:acl,,1733400133831.02119c1ced80736cbd0ab02ca19ee30c., hostname=80666a11a09f,42305,1733400131012, seqNum=2] 2024-12-05T12:02:20,305 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-05T12:02:20,307 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34111, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-05T12:02:20,311 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42305 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-05T12:02:20,311 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42305 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=80666a11a09f,37629,1733400130958, seqNum=-1] 2024-12-05T12:02:20,312 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42305 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-05T12:02:20,313 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57661, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.2 (auth:SIMPLE), service=ClientService 2024-12-05T12:02:20,316 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportFileSystemStateWithSplitRegion', locateType=CURRENT is [region=hbase:acl,,1733400133831.02119c1ced80736cbd0ab02ca19ee30c., hostname=80666a11a09f,42305,1733400131012, seqNum=2] 2024-12-05T12:02:20,316 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-05T12:02:20,318 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40651, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.2 (auth:SIMPLE), service=ClientService 2024-12-05T12:02:20,320 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42305 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemStateWithSplitRegion], kv [jenkins: RWXCA] 2024-12-05T12:02:20,354 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42305-0x101a6a890970003, quorum=127.0.0.1:49240, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/acl 2024-12-05T12:02:20,354 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37629-0x101a6a890970002, quorum=127.0.0.1:49240, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/acl 2024-12-05T12:02:20,354 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42305-0x101a6a890970003, quorum=127.0.0.1:49240, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T12:02:20,354 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37629-0x101a6a890970002, quorum=127.0.0.1:49240, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T12:02:20,354 DEBUG [pool-63-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42913-0x101a6a890970001, quorum=127.0.0.1:49240, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/acl 2024-12-05T12:02:20,354 DEBUG [pool-63-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42913-0x101a6a890970001, quorum=127.0.0.1:49240, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T12:02:20,354 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34947-0x101a6a890970000, quorum=127.0.0.1:49240, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/acl 2024-12-05T12:02:20,354 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34947-0x101a6a890970000, quorum=127.0.0.1:49240, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T12:02:20,361 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.security.access.AccessController 2024-12-05T12:02:20,362 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.security.access.AccessController Metrics about HBase RegionObservers 2024-12-05T12:02:20,362 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-12-05T12:02:20,362 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-12-05T12:02:20,364 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.RegionServer.CP_org.apache.hadoop.hbase.security.access.AccessController 2024-12-05T12:02:20,364 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.RegionServer.CP_org.apache.hadoop.hbase.security.access.AccessController Metrics about HBase RegionServerObservers 2024-12-05T12:02:20,364 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-12-05T12:02:20,364 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-12-05T12:02:20,365 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithSplitRegion 2024-12-05T12:02:20,365 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithSplitRegion Metrics about Tables on a single HBase RegionServer 2024-12-05T12:02:20,366 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_acl 2024-12-05T12:02:20,366 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_acl Metrics about Tables on a single HBase RegionServer 2024-12-05T12:02:20,368 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: Master,sub=Coprocessor.Master.CP_org.apache.hadoop.hbase.security.access.AccessController 2024-12-05T12:02:20,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-05T12:02:20,368 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering Master,sub=Coprocessor.Master.CP_org.apache.hadoop.hbase.security.access.AccessController Metrics about HBase MasterObservers 2024-12-05T12:02:20,368 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: Master,sub=Coprocessor.Master.CP_org.apache.hadoop.hbase.security.access.SecureTestUtil$MasterSyncObserver 2024-12-05T12:02:20,368 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering Master,sub=Coprocessor.Master.CP_org.apache.hadoop.hbase.security.access.SecureTestUtil$MasterSyncObserver Metrics about HBase MasterObservers 2024-12-05T12:02:20,369 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-05T12:02:20,369 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-12-05T12:02:20,371 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37629-0x101a6a890970002, quorum=127.0.0.1:49240, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSplitRegion 2024-12-05T12:02:20,371 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSplitRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithSplitRegion \x00 \x01 \x02 \x03 \x04 2024-12-05T12:02:20,371 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSplitRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithSplitRegion \x00 \x01 \x02 \x03 \x04 2024-12-05T12:02:20,371 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSplitRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithSplitRegion \x00 \x01 \x02 \x03 \x04 2024-12-05T12:02:20,372 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSplitRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithSplitRegion \x00 \x01 \x02 \x03 \x04 2024-12-05T12:02:20,376 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; CreateTableProcedure table=testtb-testExportFileSystemStateWithSplitRegion in 646 msec 2024-12-05T12:02:20,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-05T12:02:20,879 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testtb-testExportFileSystemStateWithSplitRegion completed 2024-12-05T12:02:20,881 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithSplitRegion,, stopping at row=testtb-testExportFileSystemStateWithSplitRegion ,, for max=2147483647 with caching=100 2024-12-05T12:02:20,886 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testExportFileSystemStateWithSplitRegion 2024-12-05T12:02:20,887 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testExportFileSystemStateWithSplitRegion,,1733400139725.f4bc653ea89173afedeec879d98060f1. 2024-12-05T12:02:20,888 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-05T12:02:20,891 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithSplitRegion,, stopping at row=testtb-testExportFileSystemStateWithSplitRegion ,, for max=2147483647 with caching=100 2024-12-05T12:02:20,905 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithSplitRegion,, stopping at row=testtb-testExportFileSystemStateWithSplitRegion ,, for max=2147483647 with caching=100 2024-12-05T12:02:20,913 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-05T12:02:20,915 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56844, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-05T12:02:20,917 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithSplitRegion,, stopping at row=testtb-testExportFileSystemStateWithSplitRegion ,, for max=2147483647 with caching=100 2024-12-05T12:02:20,927 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=emptySnaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } 2024-12-05T12:02:20,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1733400140927 (current time:1733400140927). 2024-12-05T12:02:20,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-05T12:02:20,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot emptySnaptb0-testExportFileSystemStateWithSplitRegion VERSION not specified, setting to 2 2024-12-05T12:02:20,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-05T12:02:20,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@40ca244b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T12:02:20,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] client.ClusterIdFetcher(90): Going to request 80666a11a09f,34947,-1 for getting cluster id 2024-12-05T12:02:20,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-05T12:02:20,931 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'af3bc8f5-5d92-4279-9ca6-4b73c59b8f54' 2024-12-05T12:02:20,931 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-05T12:02:20,931 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "af3bc8f5-5d92-4279-9ca6-4b73c59b8f54" 2024-12-05T12:02:20,931 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@192e8ffb, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T12:02:20,931 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [80666a11a09f,34947,-1] 2024-12-05T12:02:20,932 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-05T12:02:20,932 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T12:02:20,933 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33106, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-05T12:02:20,934 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4c43c047, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T12:02:20,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-05T12:02:20,936 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=80666a11a09f,37629,1733400130958, seqNum=-1] 2024-12-05T12:02:20,936 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-05T12:02:20,937 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51104, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-05T12:02:20,940 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947. 2024-12-05T12:02:20,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-05T12:02:20,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T12:02:20,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T12:02:20,947 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-05T12:02:20,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@479ef1ff, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T12:02:20,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] client.ClusterIdFetcher(90): Going to request 80666a11a09f,34947,-1 for getting cluster id 2024-12-05T12:02:20,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-05T12:02:20,950 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'af3bc8f5-5d92-4279-9ca6-4b73c59b8f54' 2024-12-05T12:02:20,950 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-05T12:02:20,950 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "af3bc8f5-5d92-4279-9ca6-4b73c59b8f54" 2024-12-05T12:02:20,950 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1f235526, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T12:02:20,951 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [80666a11a09f,34947,-1] 2024-12-05T12:02:20,951 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-05T12:02:20,951 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T12:02:20,952 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33116, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-05T12:02:20,953 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1177e5cb, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T12:02:20,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-05T12:02:20,955 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=80666a11a09f,37629,1733400130958, seqNum=-1] 2024-12-05T12:02:20,956 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-05T12:02:20,957 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51116, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-05T12:02:20,959 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportFileSystemStateWithSplitRegion', locateType=CURRENT is [region=hbase:acl,,1733400133831.02119c1ced80736cbd0ab02ca19ee30c., hostname=80666a11a09f,42305,1733400131012, seqNum=2] 2024-12-05T12:02:20,959 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-05T12:02:20,960 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56848, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-05T12:02:20,961 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947. 2024-12-05T12:02:20,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-05T12:02:20,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T12:02:20,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T12:02:20,961 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-05T12:02:20,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemStateWithSplitRegion], kv [jenkins: RWXCA] 2024-12-05T12:02:20,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-05T12:02:20,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] procedure2.ProcedureExecutor(1139): Stored pid=12, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } 2024-12-05T12:02:20,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 }, snapshot procedure id = 12 2024-12-05T12:02:20,972 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=12, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-05T12:02:20,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=12 2024-12-05T12:02:20,977 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=12, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-05T12:02:20,986 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=12, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-05T12:02:20,998 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073741845_1021 (size=215) 2024-12-05T12:02:20,999 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073741845_1021 (size=215) 2024-12-05T12:02:20,999 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073741845_1021 (size=215) 2024-12-05T12:02:21,001 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=12, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-05T12:02:21,004 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=13, ppid=12, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure f4bc653ea89173afedeec879d98060f1}, {pid=14, ppid=12, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 446097405ca8c47d8ea55b39855ba137}] 2024-12-05T12:02:21,009 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=14, ppid=12, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 446097405ca8c47d8ea55b39855ba137 2024-12-05T12:02:21,009 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=13, ppid=12, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure f4bc653ea89173afedeec879d98060f1 2024-12-05T12:02:21,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=12 2024-12-05T12:02:21,166 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42305 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=14 2024-12-05T12:02:21,166 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37629 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=13 2024-12-05T12:02:21,167 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=13}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithSplitRegion,,1733400139725.f4bc653ea89173afedeec879d98060f1. 2024-12-05T12:02:21,168 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=14}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithSplitRegion,1,1733400139725.446097405ca8c47d8ea55b39855ba137. 2024-12-05T12:02:21,171 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=14}] regionserver.HRegion(2603): Flush status journal for 446097405ca8c47d8ea55b39855ba137: 2024-12-05T12:02:21,171 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=13}] regionserver.HRegion(2603): Flush status journal for f4bc653ea89173afedeec879d98060f1: 2024-12-05T12:02:21,171 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=13}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithSplitRegion,,1733400139725.f4bc653ea89173afedeec879d98060f1. for emptySnaptb0-testExportFileSystemStateWithSplitRegion completed. 2024-12-05T12:02:21,171 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=14}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithSplitRegion,1,1733400139725.446097405ca8c47d8ea55b39855ba137. for emptySnaptb0-testExportFileSystemStateWithSplitRegion completed. 2024-12-05T12:02:21,172 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=13}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithSplitRegion,,1733400139725.f4bc653ea89173afedeec879d98060f1.' region-info for snapshot=emptySnaptb0-testExportFileSystemStateWithSplitRegion 2024-12-05T12:02:21,172 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=14}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithSplitRegion,1,1733400139725.446097405ca8c47d8ea55b39855ba137.' region-info for snapshot=emptySnaptb0-testExportFileSystemStateWithSplitRegion 2024-12-05T12:02:21,175 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=14}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-05T12:02:21,175 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=13}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-05T12:02:21,177 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=14}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-05T12:02:21,177 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=13}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-05T12:02:21,197 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073741846_1022 (size=86) 2024-12-05T12:02:21,198 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073741846_1022 (size=86) 2024-12-05T12:02:21,198 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073741847_1023 (size=86) 2024-12-05T12:02:21,199 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073741846_1022 (size=86) 2024-12-05T12:02:21,199 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=14}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithSplitRegion,1,1733400139725.446097405ca8c47d8ea55b39855ba137. 2024-12-05T12:02:21,200 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=13}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithSplitRegion,,1733400139725.f4bc653ea89173afedeec879d98060f1. 2024-12-05T12:02:21,200 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073741847_1023 (size=86) 2024-12-05T12:02:21,200 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073741847_1023 (size=86) 2024-12-05T12:02:21,201 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=14}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=14 2024-12-05T12:02:21,201 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=13 2024-12-05T12:02:21,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] master.HMaster(4169): Remote procedure done, pid=13 2024-12-05T12:02:21,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] master.HMaster(4169): Remote procedure done, pid=14 2024-12-05T12:02:21,204 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportFileSystemStateWithSplitRegion on region f4bc653ea89173afedeec879d98060f1 2024-12-05T12:02:21,204 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportFileSystemStateWithSplitRegion on region 446097405ca8c47d8ea55b39855ba137 2024-12-05T12:02:21,204 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=13, ppid=12, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure f4bc653ea89173afedeec879d98060f1 2024-12-05T12:02:21,206 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=14, ppid=12, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 446097405ca8c47d8ea55b39855ba137 2024-12-05T12:02:21,210 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=13, ppid=12, state=SUCCESS, hasLock=false; SnapshotRegionProcedure f4bc653ea89173afedeec879d98060f1 in 203 msec 2024-12-05T12:02:21,213 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=14, resume processing ppid=12 2024-12-05T12:02:21,213 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=12, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-05T12:02:21,213 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=14, ppid=12, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 446097405ca8c47d8ea55b39855ba137 in 204 msec 2024-12-05T12:02:21,216 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=12, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-05T12:02:21,219 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-12-05T12:02:21,219 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-12-05T12:02:21,219 DEBUG [MobRegionSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T12:02:21,220 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(328): No files under family: cf 2024-12-05T12:02:21,233 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073741848_1024 (size=78) 2024-12-05T12:02:21,233 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073741848_1024 (size=78) 2024-12-05T12:02:21,234 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073741848_1024 (size=78) 2024-12-05T12:02:21,237 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=12, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-05T12:02:21,237 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testExportFileSystemStateWithSplitRegion 2024-12-05T12:02:21,240 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/.hbase-snapshot/.tmp/emptySnaptb0-testExportFileSystemStateWithSplitRegion 2024-12-05T12:02:21,261 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073741849_1025 (size=713) 2024-12-05T12:02:21,261 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073741849_1025 (size=713) 2024-12-05T12:02:21,261 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073741849_1025 (size=713) 2024-12-05T12:02:21,269 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=12, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-05T12:02:21,281 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=12, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-05T12:02:21,282 DEBUG [PEWorker-2 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/.hbase-snapshot/.tmp/emptySnaptb0-testExportFileSystemStateWithSplitRegion to hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/.hbase-snapshot/emptySnaptb0-testExportFileSystemStateWithSplitRegion 2024-12-05T12:02:21,285 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=12, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-05T12:02:21,285 DEBUG [PEWorker-2 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 }, snapshot procedure id = 12 2024-12-05T12:02:21,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=12 2024-12-05T12:02:21,289 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=12, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } in 318 msec 2024-12-05T12:02:21,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=12 2024-12-05T12:02:21,599 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportFileSystemStateWithSplitRegion completed 2024-12-05T12:02:21,618 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37629 {}] regionserver.HRegion(8528): writing data to region testtb-testExportFileSystemStateWithSplitRegion,,1733400139725.f4bc653ea89173afedeec879d98060f1. with WAL disabled. Data may be lost in the event of a crash. 2024-12-05T12:02:21,622 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42305 {}] regionserver.HRegion(8528): writing data to region testtb-testExportFileSystemStateWithSplitRegion,1,1733400139725.446097405ca8c47d8ea55b39855ba137. with WAL disabled. Data may be lost in the event of a crash. 2024-12-05T12:02:21,626 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithSplitRegion,, stopping at row=testtb-testExportFileSystemStateWithSplitRegion ,, for max=2147483647 with caching=100 2024-12-05T12:02:21,631 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testExportFileSystemStateWithSplitRegion 2024-12-05T12:02:21,631 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testExportFileSystemStateWithSplitRegion,,1733400139725.f4bc653ea89173afedeec879d98060f1. 2024-12-05T12:02:21,632 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-05T12:02:21,634 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithSplitRegion,, stopping at row=testtb-testExportFileSystemStateWithSplitRegion ,, for max=2147483647 with caching=100 2024-12-05T12:02:21,645 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithSplitRegion,, stopping at row=testtb-testExportFileSystemStateWithSplitRegion ,, for max=2147483647 with caching=100 2024-12-05T12:02:21,656 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithSplitRegion,, stopping at row=testtb-testExportFileSystemStateWithSplitRegion ,, for max=2147483647 with caching=100 2024-12-05T12:02:21,661 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } 2024-12-05T12:02:21,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1733400141662 (current time:1733400141662). 2024-12-05T12:02:21,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-05T12:02:21,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snaptb0-testExportFileSystemStateWithSplitRegion VERSION not specified, setting to 2 2024-12-05T12:02:21,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-05T12:02:21,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@cc86c24, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T12:02:21,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] client.ClusterIdFetcher(90): Going to request 80666a11a09f,34947,-1 for getting cluster id 2024-12-05T12:02:21,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-05T12:02:21,664 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'af3bc8f5-5d92-4279-9ca6-4b73c59b8f54' 2024-12-05T12:02:21,665 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-05T12:02:21,665 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "af3bc8f5-5d92-4279-9ca6-4b73c59b8f54" 2024-12-05T12:02:21,665 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7612bfe9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T12:02:21,665 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [80666a11a09f,34947,-1] 2024-12-05T12:02:21,666 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-05T12:02:21,666 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T12:02:21,667 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33136, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-05T12:02:21,668 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4bb07222, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T12:02:21,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-05T12:02:21,670 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=80666a11a09f,37629,1733400130958, seqNum=-1] 2024-12-05T12:02:21,670 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-05T12:02:21,672 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51122, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-05T12:02:21,674 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947. 2024-12-05T12:02:21,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-05T12:02:21,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T12:02:21,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T12:02:21,674 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-05T12:02:21,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@accccc5, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T12:02:21,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] client.ClusterIdFetcher(90): Going to request 80666a11a09f,34947,-1 for getting cluster id 2024-12-05T12:02:21,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-05T12:02:21,677 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'af3bc8f5-5d92-4279-9ca6-4b73c59b8f54' 2024-12-05T12:02:21,677 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-05T12:02:21,677 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "af3bc8f5-5d92-4279-9ca6-4b73c59b8f54" 2024-12-05T12:02:21,678 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@69beda75, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T12:02:21,678 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [80666a11a09f,34947,-1] 2024-12-05T12:02:21,678 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-05T12:02:21,679 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T12:02:21,680 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33152, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-05T12:02:21,681 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@14af875f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T12:02:21,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-05T12:02:21,685 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=80666a11a09f,37629,1733400130958, seqNum=-1] 2024-12-05T12:02:21,685 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-05T12:02:21,687 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51130, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-05T12:02:21,691 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportFileSystemStateWithSplitRegion', locateType=CURRENT is [region=hbase:acl,,1733400133831.02119c1ced80736cbd0ab02ca19ee30c., hostname=80666a11a09f,42305,1733400131012, seqNum=2] 2024-12-05T12:02:21,692 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-05T12:02:21,693 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56856, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-05T12:02:21,695 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947. 2024-12-05T12:02:21,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-05T12:02:21,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T12:02:21,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T12:02:21,696 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-05T12:02:21,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemStateWithSplitRegion], kv [jenkins: RWXCA] 2024-12-05T12:02:21,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-05T12:02:21,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] procedure2.ProcedureExecutor(1139): Stored pid=15, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } 2024-12-05T12:02:21,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 }, snapshot procedure id = 15 2024-12-05T12:02:21,700 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=15, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-05T12:02:21,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=15 2024-12-05T12:02:21,702 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=15, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-05T12:02:21,707 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=15, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-05T12:02:21,725 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073741850_1026 (size=210) 2024-12-05T12:02:21,727 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073741850_1026 (size=210) 2024-12-05T12:02:21,727 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073741850_1026 (size=210) 2024-12-05T12:02:21,730 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=15, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-05T12:02:21,731 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=16, ppid=15, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure f4bc653ea89173afedeec879d98060f1}, {pid=17, ppid=15, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 446097405ca8c47d8ea55b39855ba137}] 2024-12-05T12:02:21,734 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=16, ppid=15, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure f4bc653ea89173afedeec879d98060f1 2024-12-05T12:02:21,734 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=17, ppid=15, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 446097405ca8c47d8ea55b39855ba137 2024-12-05T12:02:21,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=15 2024-12-05T12:02:21,891 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42305 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=17 2024-12-05T12:02:21,891 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37629 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=16 2024-12-05T12:02:21,891 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithSplitRegion,1,1733400139725.446097405ca8c47d8ea55b39855ba137. 2024-12-05T12:02:21,891 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithSplitRegion,,1733400139725.f4bc653ea89173afedeec879d98060f1. 2024-12-05T12:02:21,896 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] regionserver.HRegion(2902): Flushing 446097405ca8c47d8ea55b39855ba137 1/1 column families, dataSize=3.06 KB heapSize=6.86 KB 2024-12-05T12:02:21,896 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] regionserver.HRegion(2902): Flushing f4bc653ea89173afedeec879d98060f1 1/1 column families, dataSize=199 B heapSize=688 B 2024-12-05T12:02:21,990 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b20241205aed605eefcd2475cbbf0c6f7999361e5_446097405ca8c47d8ea55b39855ba137 is 71, key is 1e0f02459e145d5e1012b4a6498a9fb2/cf:q/1733400141622/Put/seqid=0 2024-12-05T12:02:21,990 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412052507399ea5214d17b606414f4859c291_f4bc653ea89173afedeec879d98060f1 is 71, key is 08902f9b9bee09f8ae77ee8696b234a6/cf:q/1733400141617/Put/seqid=0 2024-12-05T12:02:22,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=15 2024-12-05T12:02:22,021 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073741852_1028 (size=8172) 2024-12-05T12:02:22,022 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073741852_1028 (size=8172) 2024-12-05T12:02:22,023 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073741852_1028 (size=8172) 2024-12-05T12:02:22,023 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T12:02:22,026 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073741851_1027 (size=5101) 2024-12-05T12:02:22,027 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073741851_1027 (size=5101) 2024-12-05T12:02:22,029 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073741851_1027 (size=5101) 2024-12-05T12:02:22,032 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T12:02:22,115 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b20241205aed605eefcd2475cbbf0c6f7999361e5_446097405ca8c47d8ea55b39855ba137 to hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/mobdir/data/default/testtb-testExportFileSystemStateWithSplitRegion/8bc6791f9a8595eff0f40af260f3f56c/cf/c4ca4238a0b923820dcc509a6f75849b20241205aed605eefcd2475cbbf0c6f7999361e5_446097405ca8c47d8ea55b39855ba137 2024-12-05T12:02:22,115 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412052507399ea5214d17b606414f4859c291_f4bc653ea89173afedeec879d98060f1 to hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/mobdir/data/default/testtb-testExportFileSystemStateWithSplitRegion/8bc6791f9a8595eff0f40af260f3f56c/cf/d41d8cd98f00b204e9800998ecf8427e202412052507399ea5214d17b606414f4859c291_f4bc653ea89173afedeec879d98060f1 2024-12-05T12:02:22,117 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportFileSystemStateWithSplitRegion/f4bc653ea89173afedeec879d98060f1/.tmp/cf/250c1b1506e848e4b3cae25e6184e3e1, store: [table=testtb-testExportFileSystemStateWithSplitRegion family=cf region=f4bc653ea89173afedeec879d98060f1] 2024-12-05T12:02:22,118 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportFileSystemStateWithSplitRegion/446097405ca8c47d8ea55b39855ba137/.tmp/cf/79bc4769f851412fb9385fd28cade0d2, store: [table=testtb-testExportFileSystemStateWithSplitRegion family=cf region=446097405ca8c47d8ea55b39855ba137] 2024-12-05T12:02:22,137 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportFileSystemStateWithSplitRegion/f4bc653ea89173afedeec879d98060f1/.tmp/cf/250c1b1506e848e4b3cae25e6184e3e1 is 224, key is 0a6ebc372828652e5b22be74870209f2b/cf:q/1733400141617/Put/seqid=0 2024-12-05T12:02:22,137 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportFileSystemStateWithSplitRegion/446097405ca8c47d8ea55b39855ba137/.tmp/cf/79bc4769f851412fb9385fd28cade0d2 is 224, key is 177c1c42b4364aaf191dc3a2fce3aa312/cf:q/1733400141622/Put/seqid=0 2024-12-05T12:02:22,157 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073741853_1029 (size=5976) 2024-12-05T12:02:22,157 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073741853_1029 (size=5976) 2024-12-05T12:02:22,158 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073741853_1029 (size=5976) 2024-12-05T12:02:22,165 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073741854_1030 (size=15719) 2024-12-05T12:02:22,166 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073741854_1030 (size=15719) 2024-12-05T12:02:22,167 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073741854_1030 (size=15719) 2024-12-05T12:02:22,169 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=6, memsize=3.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportFileSystemStateWithSplitRegion/446097405ca8c47d8ea55b39855ba137/.tmp/cf/79bc4769f851412fb9385fd28cade0d2 2024-12-05T12:02:22,183 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportFileSystemStateWithSplitRegion/446097405ca8c47d8ea55b39855ba137/.tmp/cf/79bc4769f851412fb9385fd28cade0d2 as hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportFileSystemStateWithSplitRegion/446097405ca8c47d8ea55b39855ba137/cf/79bc4769f851412fb9385fd28cade0d2 2024-12-05T12:02:22,194 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportFileSystemStateWithSplitRegion/446097405ca8c47d8ea55b39855ba137/cf/79bc4769f851412fb9385fd28cade0d2, entries=47, sequenceid=6, filesize=15.4 K 2024-12-05T12:02:22,202 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] regionserver.HRegion(3140): Finished flush of dataSize ~3.06 KB/3137, heapSize ~6.84 KB/7008, currentSize=0 B/0 for 446097405ca8c47d8ea55b39855ba137 in 306ms, sequenceid=6, compaction requested=false 2024-12-05T12:02:22,202 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportFileSystemStateWithSplitRegion' 2024-12-05T12:02:22,203 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] regionserver.HRegion(2603): Flush status journal for 446097405ca8c47d8ea55b39855ba137: 2024-12-05T12:02:22,204 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithSplitRegion,1,1733400139725.446097405ca8c47d8ea55b39855ba137. for snaptb0-testExportFileSystemStateWithSplitRegion completed. 2024-12-05T12:02:22,204 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithSplitRegion,1,1733400139725.446097405ca8c47d8ea55b39855ba137.' region-info for snapshot=snaptb0-testExportFileSystemStateWithSplitRegion 2024-12-05T12:02:22,204 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-05T12:02:22,204 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportFileSystemStateWithSplitRegion/446097405ca8c47d8ea55b39855ba137/cf/79bc4769f851412fb9385fd28cade0d2] hfiles 2024-12-05T12:02:22,207 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportFileSystemStateWithSplitRegion/446097405ca8c47d8ea55b39855ba137/cf/79bc4769f851412fb9385fd28cade0d2 for snapshot=snaptb0-testExportFileSystemStateWithSplitRegion 2024-12-05T12:02:22,230 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073741855_1031 (size=125) 2024-12-05T12:02:22,231 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073741855_1031 (size=125) 2024-12-05T12:02:22,231 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073741855_1031 (size=125) 2024-12-05T12:02:22,232 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithSplitRegion,1,1733400139725.446097405ca8c47d8ea55b39855ba137. 2024-12-05T12:02:22,232 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=17 2024-12-05T12:02:22,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] master.HMaster(4169): Remote procedure done, pid=17 2024-12-05T12:02:22,233 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportFileSystemStateWithSplitRegion on region 446097405ca8c47d8ea55b39855ba137 2024-12-05T12:02:22,233 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=17, ppid=15, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 446097405ca8c47d8ea55b39855ba137 2024-12-05T12:02:22,237 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=17, ppid=15, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 446097405ca8c47d8ea55b39855ba137 in 503 msec 2024-12-05T12:02:22,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=15 2024-12-05T12:02:22,558 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=6, memsize=199, hasBloomFilter=true, into tmp file hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportFileSystemStateWithSplitRegion/f4bc653ea89173afedeec879d98060f1/.tmp/cf/250c1b1506e848e4b3cae25e6184e3e1 2024-12-05T12:02:22,575 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportFileSystemStateWithSplitRegion/f4bc653ea89173afedeec879d98060f1/.tmp/cf/250c1b1506e848e4b3cae25e6184e3e1 as hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportFileSystemStateWithSplitRegion/f4bc653ea89173afedeec879d98060f1/cf/250c1b1506e848e4b3cae25e6184e3e1 2024-12-05T12:02:22,591 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportFileSystemStateWithSplitRegion/f4bc653ea89173afedeec879d98060f1/cf/250c1b1506e848e4b3cae25e6184e3e1, entries=3, sequenceid=6, filesize=5.8 K 2024-12-05T12:02:22,594 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] regionserver.HRegion(3140): Finished flush of dataSize ~199 B/199, heapSize ~672 B/672, currentSize=0 B/0 for f4bc653ea89173afedeec879d98060f1 in 702ms, sequenceid=6, compaction requested=false 2024-12-05T12:02:22,594 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] regionserver.HRegion(2603): Flush status journal for f4bc653ea89173afedeec879d98060f1: 2024-12-05T12:02:22,594 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithSplitRegion,,1733400139725.f4bc653ea89173afedeec879d98060f1. for snaptb0-testExportFileSystemStateWithSplitRegion completed. 2024-12-05T12:02:22,594 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithSplitRegion,,1733400139725.f4bc653ea89173afedeec879d98060f1.' region-info for snapshot=snaptb0-testExportFileSystemStateWithSplitRegion 2024-12-05T12:02:22,595 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-05T12:02:22,595 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportFileSystemStateWithSplitRegion/f4bc653ea89173afedeec879d98060f1/cf/250c1b1506e848e4b3cae25e6184e3e1] hfiles 2024-12-05T12:02:22,595 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportFileSystemStateWithSplitRegion/f4bc653ea89173afedeec879d98060f1/cf/250c1b1506e848e4b3cae25e6184e3e1 for snapshot=snaptb0-testExportFileSystemStateWithSplitRegion 2024-12-05T12:02:22,695 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073741856_1032 (size=125) 2024-12-05T12:02:22,696 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073741856_1032 (size=125) 2024-12-05T12:02:22,697 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073741856_1032 (size=125) 2024-12-05T12:02:22,699 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithSplitRegion,,1733400139725.f4bc653ea89173afedeec879d98060f1. 2024-12-05T12:02:22,699 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=16 2024-12-05T12:02:22,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] master.HMaster(4169): Remote procedure done, pid=16 2024-12-05T12:02:22,700 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportFileSystemStateWithSplitRegion on region f4bc653ea89173afedeec879d98060f1 2024-12-05T12:02:22,700 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=16, ppid=15, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure f4bc653ea89173afedeec879d98060f1 2024-12-05T12:02:22,708 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=16, resume processing ppid=15 2024-12-05T12:02:22,709 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=16, ppid=15, state=SUCCESS, hasLock=false; SnapshotRegionProcedure f4bc653ea89173afedeec879d98060f1 in 971 msec 2024-12-05T12:02:22,709 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=15, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-05T12:02:22,711 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=15, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-05T12:02:22,715 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-12-05T12:02:22,715 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-12-05T12:02:22,715 DEBUG [MobRegionSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T12:02:22,721 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(366): Adding snapshot references for [hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/mobdir/data/default/testtb-testExportFileSystemStateWithSplitRegion/8bc6791f9a8595eff0f40af260f3f56c/cf/c4ca4238a0b923820dcc509a6f75849b20241205aed605eefcd2475cbbf0c6f7999361e5_446097405ca8c47d8ea55b39855ba137, hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/mobdir/data/default/testtb-testExportFileSystemStateWithSplitRegion/8bc6791f9a8595eff0f40af260f3f56c/cf/d41d8cd98f00b204e9800998ecf8427e202412052507399ea5214d17b606414f4859c291_f4bc653ea89173afedeec879d98060f1] hfiles 2024-12-05T12:02:22,721 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (1/2): hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/mobdir/data/default/testtb-testExportFileSystemStateWithSplitRegion/8bc6791f9a8595eff0f40af260f3f56c/cf/c4ca4238a0b923820dcc509a6f75849b20241205aed605eefcd2475cbbf0c6f7999361e5_446097405ca8c47d8ea55b39855ba137 2024-12-05T12:02:22,721 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (2/2): hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/mobdir/data/default/testtb-testExportFileSystemStateWithSplitRegion/8bc6791f9a8595eff0f40af260f3f56c/cf/d41d8cd98f00b204e9800998ecf8427e202412052507399ea5214d17b606414f4859c291_f4bc653ea89173afedeec879d98060f1 2024-12-05T12:02:22,789 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073741857_1033 (size=309) 2024-12-05T12:02:22,791 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073741857_1033 (size=309) 2024-12-05T12:02:22,794 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073741857_1033 (size=309) 2024-12-05T12:02:22,795 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=15, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-05T12:02:22,795 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testExportFileSystemStateWithSplitRegion 2024-12-05T12:02:22,798 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithSplitRegion 2024-12-05T12:02:22,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=15 2024-12-05T12:02:22,961 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073741858_1034 (size=1023) 2024-12-05T12:02:22,962 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073741858_1034 (size=1023) 2024-12-05T12:02:22,962 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073741858_1034 (size=1023) 2024-12-05T12:02:22,985 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=15, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-05T12:02:23,033 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=15, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-05T12:02:23,034 DEBUG [PEWorker-4 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithSplitRegion to hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSplitRegion 2024-12-05T12:02:23,038 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=15, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-05T12:02:23,038 DEBUG [PEWorker-4 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 }, snapshot procedure id = 15 2024-12-05T12:02:23,041 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=15, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } in 1.3410 sec 2024-12-05T12:02:23,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=15 2024-12-05T12:02:23,849 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportFileSystemStateWithSplitRegion completed 2024-12-05T12:02:23,872 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-05T12:02:23,873 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-05T12:02:23,874 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-05T12:02:23,874 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51144, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-05T12:02:23,875 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56858, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-05T12:02:23,876 INFO [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37629 {}] regionserver.CompactSplit(323): Interrupting running compactions because user switched off compactions 2024-12-05T12:02:23,876 INFO [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42305 {}] regionserver.CompactSplit(323): Interrupting running compactions because user switched off compactions 2024-12-05T12:02:23,876 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36746, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-05T12:02:23,876 INFO [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42913 {}] regionserver.CompactSplit(323): Interrupting running compactions because user switched off compactions 2024-12-05T12:02:23,879 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'testExportFileSystemStateWithSplitRegion', {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-05T12:02:23,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] procedure2.ProcedureExecutor(1139): Stored pid=18, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testExportFileSystemStateWithSplitRegion 2024-12-05T12:02:23,882 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=18, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testExportFileSystemStateWithSplitRegion execute state=CREATE_TABLE_PRE_OPERATION 2024-12-05T12:02:23,882 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T12:02:23,882 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testExportFileSystemStateWithSplitRegion" procId is: 18 2024-12-05T12:02:23,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=18 2024-12-05T12:02:23,883 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=18, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testExportFileSystemStateWithSplitRegion execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-05T12:02:23,894 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073741859_1035 (size=390) 2024-12-05T12:02:23,895 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073741859_1035 (size=390) 2024-12-05T12:02:23,895 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073741859_1035 (size=390) 2024-12-05T12:02:23,897 INFO [RegionOpenAndInit-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => d7a3a0fac78c2c670fa3e1a1d368bf66, NAME => 'testExportFileSystemStateWithSplitRegion,,1733400143878.d7a3a0fac78c2c670fa3e1a1d368bf66.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='testExportFileSystemStateWithSplitRegion', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300 2024-12-05T12:02:23,911 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073741860_1036 (size=75) 2024-12-05T12:02:23,911 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073741860_1036 (size=75) 2024-12-05T12:02:23,911 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073741860_1036 (size=75) 2024-12-05T12:02:23,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=18 2024-12-05T12:02:24,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=18 2024-12-05T12:02:24,315 DEBUG [RegionOpenAndInit-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(898): Instantiated testExportFileSystemStateWithSplitRegion,,1733400143878.d7a3a0fac78c2c670fa3e1a1d368bf66.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-05T12:02:24,315 DEBUG [RegionOpenAndInit-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(1722): Closing d7a3a0fac78c2c670fa3e1a1d368bf66, disabling compactions & flushes 2024-12-05T12:02:24,315 INFO [RegionOpenAndInit-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(1755): Closing region testExportFileSystemStateWithSplitRegion,,1733400143878.d7a3a0fac78c2c670fa3e1a1d368bf66. 2024-12-05T12:02:24,315 DEBUG [RegionOpenAndInit-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testExportFileSystemStateWithSplitRegion,,1733400143878.d7a3a0fac78c2c670fa3e1a1d368bf66. 2024-12-05T12:02:24,315 DEBUG [RegionOpenAndInit-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testExportFileSystemStateWithSplitRegion,,1733400143878.d7a3a0fac78c2c670fa3e1a1d368bf66. after waiting 0 ms 2024-12-05T12:02:24,315 DEBUG [RegionOpenAndInit-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testExportFileSystemStateWithSplitRegion,,1733400143878.d7a3a0fac78c2c670fa3e1a1d368bf66. 2024-12-05T12:02:24,315 INFO [RegionOpenAndInit-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(1973): Closed testExportFileSystemStateWithSplitRegion,,1733400143878.d7a3a0fac78c2c670fa3e1a1d368bf66. 2024-12-05T12:02:24,316 DEBUG [RegionOpenAndInit-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(1676): Region close journal for d7a3a0fac78c2c670fa3e1a1d368bf66: Waiting for close lock at 1733400144315Disabling compacts and flushes for region at 1733400144315Disabling writes for close at 1733400144315Writing region close event to WAL at 1733400144315Closed at 1733400144315 2024-12-05T12:02:24,319 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=18, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testExportFileSystemStateWithSplitRegion execute state=CREATE_TABLE_ADD_TO_META 2024-12-05T12:02:24,320 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testExportFileSystemStateWithSplitRegion,,1733400143878.d7a3a0fac78c2c670fa3e1a1d368bf66.","families":{"info":[{"qualifier":"regioninfo","vlen":74,"tag":[],"timestamp":"1733400144319"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733400144319"}]},"ts":"1733400144319"} 2024-12-05T12:02:24,328 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-12-05T12:02:24,331 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=18, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testExportFileSystemStateWithSplitRegion execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-05T12:02:24,331 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testExportFileSystemStateWithSplitRegion","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733400144331"}]},"ts":"1733400144331"} 2024-12-05T12:02:24,336 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=testExportFileSystemStateWithSplitRegion, state=ENABLING in hbase:meta 2024-12-05T12:02:24,336 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(204): Hosts are {80666a11a09f=0} racks are {/default-rack=0} 2024-12-05T12:02:24,338 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-05T12:02:24,338 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-05T12:02:24,338 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-05T12:02:24,339 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-05T12:02:24,339 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-05T12:02:24,339 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-05T12:02:24,339 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-05T12:02:24,339 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-05T12:02:24,339 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-05T12:02:24,339 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-05T12:02:24,339 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=19, ppid=18, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=d7a3a0fac78c2c670fa3e1a1d368bf66, ASSIGN}] 2024-12-05T12:02:24,343 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=19, ppid=18, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=d7a3a0fac78c2c670fa3e1a1d368bf66, ASSIGN 2024-12-05T12:02:24,345 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(269): Starting pid=19, ppid=18, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=d7a3a0fac78c2c670fa3e1a1d368bf66, ASSIGN; state=OFFLINE, location=80666a11a09f,42305,1733400131012; forceNewPlan=false, retain=false 2024-12-05T12:02:24,496 INFO [80666a11a09f:34947 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-12-05T12:02:24,497 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=19 updating hbase:meta row=d7a3a0fac78c2c670fa3e1a1d368bf66, regionState=OPENING, regionLocation=80666a11a09f,42305,1733400131012 2024-12-05T12:02:24,503 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=19, ppid=18, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=d7a3a0fac78c2c670fa3e1a1d368bf66, ASSIGN because future has completed 2024-12-05T12:02:24,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=18 2024-12-05T12:02:24,515 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=20, ppid=19, state=RUNNABLE, hasLock=false; OpenRegionProcedure d7a3a0fac78c2c670fa3e1a1d368bf66, server=80666a11a09f,42305,1733400131012}] 2024-12-05T12:02:24,683 INFO [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] handler.AssignRegionHandler(132): Open testExportFileSystemStateWithSplitRegion,,1733400143878.d7a3a0fac78c2c670fa3e1a1d368bf66. 2024-12-05T12:02:24,683 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(7752): Opening region: {ENCODED => d7a3a0fac78c2c670fa3e1a1d368bf66, NAME => 'testExportFileSystemStateWithSplitRegion,,1733400143878.d7a3a0fac78c2c670fa3e1a1d368bf66.', STARTKEY => '', ENDKEY => ''} 2024-12-05T12:02:24,684 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(8280): Registered coprocessor service: region=testExportFileSystemStateWithSplitRegion,,1733400143878.d7a3a0fac78c2c670fa3e1a1d368bf66. service=AccessControlService 2024-12-05T12:02:24,684 INFO [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-05T12:02:24,685 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testExportFileSystemStateWithSplitRegion d7a3a0fac78c2c670fa3e1a1d368bf66 2024-12-05T12:02:24,685 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(898): Instantiated testExportFileSystemStateWithSplitRegion,,1733400143878.d7a3a0fac78c2c670fa3e1a1d368bf66.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-05T12:02:24,685 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(7794): checking encryption for d7a3a0fac78c2c670fa3e1a1d368bf66 2024-12-05T12:02:24,685 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(7797): checking classloading for d7a3a0fac78c2c670fa3e1a1d368bf66 2024-12-05T12:02:24,694 INFO [StoreOpener-d7a3a0fac78c2c670fa3e1a1d368bf66-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region d7a3a0fac78c2c670fa3e1a1d368bf66 2024-12-05T12:02:24,700 INFO [StoreOpener-d7a3a0fac78c2c670fa3e1a1d368bf66-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region d7a3a0fac78c2c670fa3e1a1d368bf66 columnFamilyName cf 2024-12-05T12:02:24,700 DEBUG [StoreOpener-d7a3a0fac78c2c670fa3e1a1d368bf66-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T12:02:24,704 INFO [StoreOpener-d7a3a0fac78c2c670fa3e1a1d368bf66-1 {}] regionserver.HStore(327): Store=d7a3a0fac78c2c670fa3e1a1d368bf66/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-05T12:02:24,705 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(1038): replaying wal for d7a3a0fac78c2c670fa3e1a1d368bf66 2024-12-05T12:02:24,707 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testExportFileSystemStateWithSplitRegion/d7a3a0fac78c2c670fa3e1a1d368bf66 2024-12-05T12:02:24,708 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testExportFileSystemStateWithSplitRegion/d7a3a0fac78c2c670fa3e1a1d368bf66 2024-12-05T12:02:24,709 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(1048): stopping wal replay for d7a3a0fac78c2c670fa3e1a1d368bf66 2024-12-05T12:02:24,709 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(1060): Cleaning up temporary data for d7a3a0fac78c2c670fa3e1a1d368bf66 2024-12-05T12:02:24,713 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(1093): writing seq id for d7a3a0fac78c2c670fa3e1a1d368bf66 2024-12-05T12:02:24,728 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testExportFileSystemStateWithSplitRegion/d7a3a0fac78c2c670fa3e1a1d368bf66/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-05T12:02:24,729 INFO [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(1114): Opened d7a3a0fac78c2c670fa3e1a1d368bf66; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=75194844, jitterRate=0.12049049139022827}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-05T12:02:24,729 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(1122): Running coprocessor post-open hooks for d7a3a0fac78c2c670fa3e1a1d368bf66 2024-12-05T12:02:24,731 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(1006): Region open journal for d7a3a0fac78c2c670fa3e1a1d368bf66: Running coprocessor pre-open hook at 1733400144685Writing region info on filesystem at 1733400144685Initializing all the Stores at 1733400144687 (+2 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733400144688 (+1 ms)Cleaning up temporary data from old regions at 1733400144709 (+21 ms)Running coprocessor post-open hooks at 1733400144729 (+20 ms)Region opened successfully at 1733400144730 (+1 ms) 2024-12-05T12:02:24,732 INFO [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegionServer(2236): Post open deploy tasks for testExportFileSystemStateWithSplitRegion,,1733400143878.d7a3a0fac78c2c670fa3e1a1d368bf66., pid=20, masterSystemTime=1733400144670 2024-12-05T12:02:24,736 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegionServer(2266): Finished post open deploy task for testExportFileSystemStateWithSplitRegion,,1733400143878.d7a3a0fac78c2c670fa3e1a1d368bf66. 2024-12-05T12:02:24,736 INFO [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] handler.AssignRegionHandler(153): Opened testExportFileSystemStateWithSplitRegion,,1733400143878.d7a3a0fac78c2c670fa3e1a1d368bf66. 2024-12-05T12:02:24,738 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=19 updating hbase:meta row=d7a3a0fac78c2c670fa3e1a1d368bf66, regionState=OPEN, openSeqNum=2, regionLocation=80666a11a09f,42305,1733400131012 2024-12-05T12:02:24,743 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=20, ppid=19, state=RUNNABLE, hasLock=false; OpenRegionProcedure d7a3a0fac78c2c670fa3e1a1d368bf66, server=80666a11a09f,42305,1733400131012 because future has completed 2024-12-05T12:02:24,752 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=20, resume processing ppid=19 2024-12-05T12:02:24,752 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=20, ppid=19, state=SUCCESS, hasLock=false; OpenRegionProcedure d7a3a0fac78c2c670fa3e1a1d368bf66, server=80666a11a09f,42305,1733400131012 in 231 msec 2024-12-05T12:02:24,759 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=19, resume processing ppid=18 2024-12-05T12:02:24,759 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=19, ppid=18, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=d7a3a0fac78c2c670fa3e1a1d368bf66, ASSIGN in 413 msec 2024-12-05T12:02:24,761 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=18, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testExportFileSystemStateWithSplitRegion execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-05T12:02:24,761 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testExportFileSystemStateWithSplitRegion","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733400144761"}]},"ts":"1733400144761"} 2024-12-05T12:02:24,767 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=testExportFileSystemStateWithSplitRegion, state=ENABLED in hbase:meta 2024-12-05T12:02:24,771 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=18, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testExportFileSystemStateWithSplitRegion execute state=CREATE_TABLE_POST_OPERATION 2024-12-05T12:02:24,771 DEBUG [PEWorker-2 {}] access.PermissionStorage(177): Writing permission with rowKey testExportFileSystemStateWithSplitRegion jenkins: RWXCA 2024-12-05T12:02:24,781 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42305 {}] access.PermissionStorage(613): Read acl: entry[testExportFileSystemStateWithSplitRegion], kv [jenkins: RWXCA] 2024-12-05T12:02:24,829 DEBUG [pool-63-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42913-0x101a6a890970001, quorum=127.0.0.1:49240, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-05T12:02:24,829 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34947-0x101a6a890970000, quorum=127.0.0.1:49240, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-05T12:02:24,829 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42305-0x101a6a890970003, quorum=127.0.0.1:49240, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-05T12:02:24,829 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37629-0x101a6a890970002, quorum=127.0.0.1:49240, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-05T12:02:24,839 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportFileSystemStateWithSplitRegion with data PBUF\x0AN\x0A\x07jenkins\x12C\x08\x03"?\x0A3\x0A\x07default\x12(testExportFileSystemStateWithSplitRegion \x00 \x01 \x02 \x03 \x04 2024-12-05T12:02:24,840 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSplitRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithSplitRegion \x00 \x01 \x02 \x03 \x04 2024-12-05T12:02:24,840 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportFileSystemStateWithSplitRegion with data PBUF\x0AN\x0A\x07jenkins\x12C\x08\x03"?\x0A3\x0A\x07default\x12(testExportFileSystemStateWithSplitRegion \x00 \x01 \x02 \x03 \x04 2024-12-05T12:02:24,840 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSplitRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithSplitRegion \x00 \x01 \x02 \x03 \x04 2024-12-05T12:02:24,844 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportFileSystemStateWithSplitRegion with data PBUF\x0AN\x0A\x07jenkins\x12C\x08\x03"?\x0A3\x0A\x07default\x12(testExportFileSystemStateWithSplitRegion \x00 \x01 \x02 \x03 \x04 2024-12-05T12:02:24,844 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportFileSystemStateWithSplitRegion with data PBUF\x0AN\x0A\x07jenkins\x12C\x08\x03"?\x0A3\x0A\x07default\x12(testExportFileSystemStateWithSplitRegion \x00 \x01 \x02 \x03 \x04 2024-12-05T12:02:24,844 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSplitRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithSplitRegion \x00 \x01 \x02 \x03 \x04 2024-12-05T12:02:24,844 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSplitRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithSplitRegion \x00 \x01 \x02 \x03 \x04 2024-12-05T12:02:24,853 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=18, state=SUCCESS, hasLock=false; CreateTableProcedure table=testExportFileSystemStateWithSplitRegion in 965 msec 2024-12-05T12:02:25,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=18 2024-12-05T12:02:25,029 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testExportFileSystemStateWithSplitRegion completed 2024-12-05T12:02:25,030 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-05T12:02:25,034 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-05T12:02:25,086 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testExportFileSystemStateWithSplitRegion' 2024-12-05T12:02:25,677 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-05T12:02:28,065 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073741861_1037 (size=134217728) 2024-12-05T12:02:28,065 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073741861_1037 (size=134217728) 2024-12-05T12:02:28,067 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073741861_1037 (size=134217728) 2024-12-05T12:02:29,848 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073741862_1038 (size=134217728) 2024-12-05T12:02:29,849 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073741862_1038 (size=134217728) 2024-12-05T12:02:29,849 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073741862_1038 (size=134217728) 2024-12-05T12:02:30,361 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testExportFileSystemStateWithSplitRegion 2024-12-05T12:02:30,361 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testExportFileSystemStateWithSplitRegion Metrics about Tables on a single HBase RegionServer 2024-12-05T12:02:30,373 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/b25dcafd-a00c-c924-93f8-4d057d2ca201/output/cf/test_file is 35, key is 1\x00\x00\x00/cf:q/1733400145041/Put/seqid=0 2024-12-05T12:02:30,376 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073741863_1039 (size=51979256) 2024-12-05T12:02:30,376 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073741863_1039 (size=51979256) 2024-12-05T12:02:30,376 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073741863_1039 (size=51979256) 2024-12-05T12:02:30,383 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3361862a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T12:02:30,383 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 80666a11a09f,34947,-1 for getting cluster id 2024-12-05T12:02:30,383 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-05T12:02:30,385 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'af3bc8f5-5d92-4279-9ca6-4b73c59b8f54' 2024-12-05T12:02:30,386 DEBUG [RPCClient-NioEventLoopGroup-6-9 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-05T12:02:30,386 DEBUG [RPCClient-NioEventLoopGroup-6-9 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "af3bc8f5-5d92-4279-9ca6-4b73c59b8f54" 2024-12-05T12:02:30,386 DEBUG [RPCClient-NioEventLoopGroup-6-9 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@133d8e5, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T12:02:30,386 DEBUG [RPCClient-NioEventLoopGroup-6-9 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [80666a11a09f,34947,-1] 2024-12-05T12:02:30,387 DEBUG [RPCClient-NioEventLoopGroup-6-9 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-05T12:02:30,387 DEBUG [RPCClient-NioEventLoopGroup-6-9 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T12:02:30,389 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45464, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-05T12:02:30,390 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3becd56f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T12:02:30,391 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-05T12:02:30,392 DEBUG [RPCClient-NioEventLoopGroup-6-10 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=80666a11a09f,37629,1733400130958, seqNum=-1] 2024-12-05T12:02:30,393 DEBUG [RPCClient-NioEventLoopGroup-6-10 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-05T12:02:30,395 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52620, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-05T12:02:30,406 WARN [Time-limited test {}] tool.BulkLoadHFilesTool$1(330): Trying to bulk load hfile hdfs://localhost:37541/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/b25dcafd-a00c-c924-93f8-4d057d2ca201/output/cf/test_file with size: 320414712 bytes can be problematic as it may lead to oversplitting. 2024-12-05T12:02:30,407 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-12-05T12:02:30,408 DEBUG [RPCClient-NioEventLoopGroup-6-10 {}] client.AsyncConnectionImpl(321): The fetched master address is 80666a11a09f,34947,1733400129965 2024-12-05T12:02:30,408 DEBUG [RPCClient-NioEventLoopGroup-6-10 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@7857e3ee 2024-12-05T12:02:30,408 DEBUG [RPCClient-NioEventLoopGroup-6-10 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-05T12:02:30,410 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45470, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-05T12:02:30,418 WARN [IPC Server handler 3 on default port 37541 {}] namenode.FSNamesystem(6314): trying to get DT with no secret manager running 2024-12-05T12:02:30,425 DEBUG [RPCClient-NioEventLoopGroup-6-11 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testExportFileSystemStateWithSplitRegion', row='', locateType=CURRENT is [region=testExportFileSystemStateWithSplitRegion,,1733400143878.d7a3a0fac78c2c670fa3e1a1d368bf66., hostname=80666a11a09f,42305,1733400131012, seqNum=2] 2024-12-05T12:02:30,428 DEBUG [RPCClient-NioEventLoopGroup-6-11 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-05T12:02:30,431 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51220, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-05T12:02:30,437 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportFileSystemStateWithSplitRegion,, stopping at row=testExportFileSystemStateWithSplitRegion ,, for max=2147483647 with caching=100 2024-12-05T12:02:30,463 INFO [BulkLoadHFilesTool-0 {}] tool.BulkLoadHFilesTool(704): Trying to load hfile=hdfs://localhost:37541/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/b25dcafd-a00c-c924-93f8-4d057d2ca201/output/cf/test_file first=Optional[1\x00\x00\x00] last=Optional[9\x00\x00\x00] 2024-12-05T12:02:30,492 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37629 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.exceptions.UnknownProtocolException: No registered coprocessor service found for AuthenticationService in region hbase:meta,,1 at org.apache.hadoop.hbase.regionserver.HRegion.execService(HRegion.java:8304) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.execServiceOnRegion(RSRpcServices.java:2441) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.execService(RSRpcServices.java:2415) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43516) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:4.0.0-alpha-1-SNAPSHOT] 2024-12-05T12:02:30,496 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37629 {}] ipc.CallRunner(138): callId: 178 service: ClientService methodName: ExecService size: 101 connection: 172.17.0.2:57661 deadline: 1733400210492, exception=org.apache.hadoop.hbase.exceptions.UnknownProtocolException: No registered coprocessor service found for AuthenticationService in region hbase:meta,,1 2024-12-05T12:02:30,498 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42305 {}] regionserver.SecureBulkLoadManager(227): unable to add token java.util.concurrent.ExecutionException: org.apache.hadoop.hbase.exceptions.UnknownProtocolException: org.apache.hadoop.hbase.exceptions.UnknownProtocolException: No registered coprocessor service found for AuthenticationService in region hbase:meta,,1 at org.apache.hadoop.hbase.regionserver.HRegion.execService(HRegion.java:8304) at org.apache.hadoop.hbase.regionserver.RSRpcServices.execServiceOnRegion(RSRpcServices.java:2441) at org.apache.hadoop.hbase.regionserver.RSRpcServices.execService(RSRpcServices.java:2415) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43516) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) at java.util.concurrent.CompletableFuture.reportGet(CompletableFuture.java:396) ~[?:?] at java.util.concurrent.CompletableFuture.get(CompletableFuture.java:2073) ~[?:?] at org.apache.hadoop.hbase.regionserver.SecureBulkLoadManager.secureBulkLoadHFiles(SecureBulkLoadManager.java:221) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.bulkLoadHFile(RSRpcServices.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43510) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:4.0.0-alpha-1-SNAPSHOT] Caused by: org.apache.hadoop.hbase.exceptions.UnknownProtocolException: org.apache.hadoop.hbase.exceptions.UnknownProtocolException: No registered coprocessor service found for AuthenticationService in region hbase:meta,,1 at org.apache.hadoop.hbase.regionserver.HRegion.execService(HRegion.java:8304) at org.apache.hadoop.hbase.regionserver.RSRpcServices.execServiceOnRegion(RSRpcServices.java:2441) at org.apache.hadoop.hbase.regionserver.RSRpcServices.execService(RSRpcServices.java:2415) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43516) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.ConnectionUtils.translateException(ConnectionUtils.java:219) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.onError(AsyncRpcRetryingCaller.java:165) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncSingleRequestRpcRetryingCaller.lambda$call$4(AsyncSingleRequestRpcRetryingCaller.java:86) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.FutureUtils.lambda$addListener$0(FutureUtils.java:71) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at java.util.concurrent.CompletableFuture.uniWhenComplete(CompletableFuture.java:863) ~[?:?] at java.util.concurrent.CompletableFuture$UniWhenComplete.tryFire(CompletableFuture.java:841) ~[?:?] at java.util.concurrent.CompletableFuture.postComplete(CompletableFuture.java:510) ~[?:?] at java.util.concurrent.CompletableFuture.completeExceptionally(CompletableFuture.java:2162) ~[?:?] at org.apache.hadoop.hbase.client.RegionCoprocessorRpcChannelImpl.lambda$rpcCall$0(RegionCoprocessorRpcChannelImpl.java:90) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hbase.thirdparty.com.google.protobuf.RpcUtil$1.run(RpcUtil.java:56) ~[hbase-shaded-protobuf-4.1.9.jar:4.1.9] at org.apache.hbase.thirdparty.com.google.protobuf.RpcUtil$1.run(RpcUtil.java:47) ~[hbase-shaded-protobuf-4.1.9.jar:4.1.9] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:397) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.AbstractEpollStreamChannel$EpollStreamUnsafe.epollInReady(AbstractEpollStreamChannel.java:799) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.processReady(EpollEventLoop.java:501) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:399) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) ~[hbase-shaded-netty-4.1.9.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException: org.apache.hadoop.hbase.exceptions.UnknownProtocolException: No registered coprocessor service found for AuthenticationService in region hbase:meta,,1 at org.apache.hadoop.hbase.regionserver.HRegion.execService(HRegion.java:8304) at org.apache.hadoop.hbase.regionserver.RSRpcServices.execServiceOnRegion(RSRpcServices.java:2441) at org.apache.hadoop.hbase.regionserver.RSRpcServices.execService(RSRpcServices.java:2415) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43516) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.AbstractEpollStreamChannel$EpollStreamUnsafe.epollInReady(AbstractEpollStreamChannel.java:799) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.processReady(EpollEventLoop.java:501) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:399) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) ~[hbase-shaded-netty-4.1.9.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-05T12:02:30,501 WARN [IPC Server handler 3 on default port 37541 {}] namenode.FSNamesystem(6314): trying to get DT with no secret manager running 2024-12-05T12:02:30,523 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42305 {}] regionserver.HStore(614): Validating hfile at hdfs://localhost:37541/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/b25dcafd-a00c-c924-93f8-4d057d2ca201/output/cf/test_file for inclusion in d7a3a0fac78c2c670fa3e1a1d368bf66/cf 2024-12-05T12:02:30,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42305 {}] regionserver.HStore(626): HFile bounds: first=1\x00\x00\x00 last=9\x00\x00\x00 2024-12-05T12:02:30,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42305 {}] regionserver.HStore(628): Region bounds: first= last= 2024-12-05T12:02:30,533 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42305 {}] regionserver.HStore(641): Trying to bulk load hfile hdfs://localhost:37541/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/b25dcafd-a00c-c924-93f8-4d057d2ca201/output/cf/test_file with size: 320414712 bytes can be problematic as it may lead to oversplitting. 2024-12-05T12:02:30,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42305 {}] regionserver.HRegion(2603): Flush status journal for d7a3a0fac78c2c670fa3e1a1d368bf66: 2024-12-05T12:02:30,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42305 {}] regionserver.SecureBulkLoadManager$SecureBulkLoadListener(397): Moving hdfs://localhost:37541/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/b25dcafd-a00c-c924-93f8-4d057d2ca201/output/cf/test_file to hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/staging/jenkins__testExportFileSystemStateWithSplitRegion__i5k2p9ueqol8nqljerqp25b9v0eff4vp4ste1k3brmv1lqhuvbaa4ff381alinaj/cf/test_file 2024-12-05T12:02:30,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42305 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/staging/jenkins__testExportFileSystemStateWithSplitRegion__i5k2p9ueqol8nqljerqp25b9v0eff4vp4ste1k3brmv1lqhuvbaa4ff381alinaj/cf/test_file as hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testExportFileSystemStateWithSplitRegion/d7a3a0fac78c2c670fa3e1a1d368bf66/cf/540ad97146874d80af9cb7c5229f6232_SeqId_4_ 2024-12-05T12:02:30,541 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42305 {}] regionserver.HStore(700): Loaded HFile hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/staging/jenkins__testExportFileSystemStateWithSplitRegion__i5k2p9ueqol8nqljerqp25b9v0eff4vp4ste1k3brmv1lqhuvbaa4ff381alinaj/cf/test_file into d7a3a0fac78c2c670fa3e1a1d368bf66/cf as hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testExportFileSystemStateWithSplitRegion/d7a3a0fac78c2c670fa3e1a1d368bf66/cf/540ad97146874d80af9cb7c5229f6232_SeqId_4_ - updating store file list. 2024-12-05T12:02:30,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42305 {}] regionserver.HStoreFile(483): HFile Bloom filter type for 540ad97146874d80af9cb7c5229f6232_SeqId_4_: NONE, but ROW specified in column family configuration 2024-12-05T12:02:30,551 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42305 {}] regionserver.HStore(722): Loaded HFile hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testExportFileSystemStateWithSplitRegion/d7a3a0fac78c2c670fa3e1a1d368bf66/cf/540ad97146874d80af9cb7c5229f6232_SeqId_4_ into d7a3a0fac78c2c670fa3e1a1d368bf66/cf 2024-12-05T12:02:30,551 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42305 {}] regionserver.HStore(706): Successfully loaded hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/staging/jenkins__testExportFileSystemStateWithSplitRegion__i5k2p9ueqol8nqljerqp25b9v0eff4vp4ste1k3brmv1lqhuvbaa4ff381alinaj/cf/test_file into d7a3a0fac78c2c670fa3e1a1d368bf66/cf (new location: hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testExportFileSystemStateWithSplitRegion/d7a3a0fac78c2c670fa3e1a1d368bf66/cf/540ad97146874d80af9cb7c5229f6232_SeqId_4_) 2024-12-05T12:02:30,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42305 {}] regionserver.SecureBulkLoadManager$SecureBulkLoadListener(412): Bulk Load done for: hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/staging/jenkins__testExportFileSystemStateWithSplitRegion__i5k2p9ueqol8nqljerqp25b9v0eff4vp4ste1k3brmv1lqhuvbaa4ff381alinaj/cf/test_file 2024-12-05T12:02:30,561 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-05T12:02:30,561 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.tool.BulkLoadHFilesTool.bulkLoad(BulkLoadHFilesTool.java:1125) at org.apache.hadoop.hbase.tool.BulkLoadHFilesTool.run(BulkLoadHFilesTool.java:1176) at org.apache.hadoop.hbase.snapshot.TestExportSnapshot.testExportFileSystemStateWithSplitRegion(TestExportSnapshot.java:229) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-05T12:02:30,561 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T12:02:30,562 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T12:02:30,562 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportFileSystemStateWithSplitRegion,, stopping at row=testExportFileSystemStateWithSplitRegion ,, for max=2147483647 with caching=100 2024-12-05T12:02:30,563 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-05T12:02:30,563 DEBUG [RPCClient-NioEventLoopGroup-6-13 {}] client.AsyncRegionLocatorHelper(64): Try updating region=testExportFileSystemStateWithSplitRegion,,1733400143878.d7a3a0fac78c2c670fa3e1a1d368bf66., hostname=80666a11a09f,42305,1733400131012, seqNum=2 , the old value is region=testExportFileSystemStateWithSplitRegion,,1733400143878.d7a3a0fac78c2c670fa3e1a1d368bf66., hostname=80666a11a09f,42305,1733400131012, seqNum=2, error=org.apache.hadoop.hbase.exceptions.ConnectionClosedException: Call to address=80666a11a09f:42305 failed on local exception: org.apache.hadoop.hbase.exceptions.ConnectionClosedException: Connection closed 2024-12-05T12:02:30,564 DEBUG [RPCClient-NioEventLoopGroup-6-13 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=testExportFileSystemStateWithSplitRegion,,1733400143878.d7a3a0fac78c2c670fa3e1a1d368bf66., hostname=80666a11a09f,42305,1733400131012, seqNum=2 is org.apache.hadoop.hbase.exceptions.ConnectionClosedException: Connection closed 2024-12-05T12:02:30,564 DEBUG [RPCClient-NioEventLoopGroup-6-13 {}] client.AsyncRegionLocatorHelper(88): Try removing region=testExportFileSystemStateWithSplitRegion,,1733400143878.d7a3a0fac78c2c670fa3e1a1d368bf66., hostname=80666a11a09f,42305,1733400131012, seqNum=2 from cache 2024-12-05T12:02:30,564 WARN [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] util.NettyFutureUtils(65): IO operation failed org.apache.hbase.thirdparty.io.netty.channel.StacklessClosedChannelException: null at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannel$AbstractUnsafe.write(Object, ChannelPromise)(Unknown Source) ~[hbase-shaded-netty-4.1.9.jar:?] 2024-12-05T12:02:30,568 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testExportFileSystemStateWithSplitRegion', row='5', locateType=CURRENT is [region=testExportFileSystemStateWithSplitRegion,,1733400143878.d7a3a0fac78c2c670fa3e1a1d368bf66., hostname=80666a11a09f,42305,1733400131012, seqNum=2] 2024-12-05T12:02:30,577 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] master.HMaster$3(2313): Client=jenkins//172.17.0.2 split testExportFileSystemStateWithSplitRegion,,1733400143878.d7a3a0fac78c2c670fa3e1a1d368bf66. 2024-12-05T12:02:30,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] assignment.SplitTableRegionProcedure(223): Splittable=true state=OPEN, location=80666a11a09f,42305,1733400131012 2024-12-05T12:02:30,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] procedure2.ProcedureExecutor(1139): Stored pid=21, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=testExportFileSystemStateWithSplitRegion, parent=d7a3a0fac78c2c670fa3e1a1d368bf66, daughterA=b528516e028fdfdc1aaa2ac017ae9a25, daughterB=854c42f125a98370341f93094bde91b0 2024-12-05T12:02:30,591 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=21, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=testExportFileSystemStateWithSplitRegion, parent=d7a3a0fac78c2c670fa3e1a1d368bf66, daughterA=b528516e028fdfdc1aaa2ac017ae9a25, daughterB=854c42f125a98370341f93094bde91b0 2024-12-05T12:02:30,591 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=21, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=testExportFileSystemStateWithSplitRegion, parent=d7a3a0fac78c2c670fa3e1a1d368bf66, daughterA=b528516e028fdfdc1aaa2ac017ae9a25, daughterB=854c42f125a98370341f93094bde91b0 2024-12-05T12:02:30,592 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=21, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=testExportFileSystemStateWithSplitRegion, parent=d7a3a0fac78c2c670fa3e1a1d368bf66, daughterA=b528516e028fdfdc1aaa2ac017ae9a25, daughterB=854c42f125a98370341f93094bde91b0 2024-12-05T12:02:30,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=21 2024-12-05T12:02:30,598 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=22, ppid=21, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=d7a3a0fac78c2c670fa3e1a1d368bf66, UNASSIGN}] 2024-12-05T12:02:30,600 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=22, ppid=21, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=d7a3a0fac78c2c670fa3e1a1d368bf66, UNASSIGN 2024-12-05T12:02:30,603 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=22 updating hbase:meta row=d7a3a0fac78c2c670fa3e1a1d368bf66, regionState=CLOSING, regionLocation=80666a11a09f,42305,1733400131012 2024-12-05T12:02:30,607 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=22, ppid=21, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=d7a3a0fac78c2c670fa3e1a1d368bf66, UNASSIGN because future has completed 2024-12-05T12:02:30,607 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: true: evictOnSplit: true: evictOnClose: false 2024-12-05T12:02:30,608 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=23, ppid=22, state=RUNNABLE, hasLock=false; CloseRegionProcedure d7a3a0fac78c2c670fa3e1a1d368bf66, server=80666a11a09f,42305,1733400131012}] 2024-12-05T12:02:30,669 WARN [Async-Client-Retry-Timer-pool-0 {}] client.AsyncNonMetaRegionLocator(265): Failed to locate region in 'testExportFileSystemStateWithSplitRegion', row='', locateType=CURRENT org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=80666a11a09f:37629 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$RpcChannelImplementation.callMethod(AbstractRpcClient.java:628) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$Stub.scan(ClientProtos.java:43851) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncClientScanner.callOpenScanner(AsyncClientScanner.java:177) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncSingleRequestRpcRetryingCaller.call(AsyncSingleRequestRpcRetryingCaller.java:84) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncSingleRequestRpcRetryingCaller.lambda$doCall$7(AsyncSingleRequestRpcRetryingCaller.java:117) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.FutureUtils.lambda$addListener$0(FutureUtils.java:71) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at java.util.concurrent.CompletableFuture.uniWhenComplete(CompletableFuture.java:863) ~[?:?] at java.util.concurrent.CompletableFuture.uniWhenCompleteStage(CompletableFuture.java:887) ~[?:?] at java.util.concurrent.CompletableFuture.whenComplete(CompletableFuture.java:2325) ~[?:?] at org.apache.hadoop.hbase.util.FutureUtils.addListener(FutureUtils.java:64) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncSingleRequestRpcRetryingCaller.doCall(AsyncSingleRequestRpcRetryingCaller.java:108) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.call(AsyncRpcRetryingCaller.java:222) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCallerFactory$SingleRequestCallerBuilder.call(AsyncRpcRetryingCallerFactory.java:177) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncClientScanner.openScanner(AsyncClientScanner.java:242) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.ConnectionUtils.timelineConsistentRead(ConnectionUtils.java:442) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncClientScanner.openScanner(AsyncClientScanner.java:255) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncClientScanner.start(AsyncClientScanner.java:275) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.RawAsyncTableImpl.scan(RawAsyncTableImpl.java:617) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.RawAsyncTableImpl.scan(RawAsyncTableImpl.java:91) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncNonMetaRegionLocator.locateInMeta(AsyncNonMetaRegionLocator.java:408) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncNonMetaRegionLocator.getRegionLocationsInternal(AsyncNonMetaRegionLocator.java:516) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncNonMetaRegionLocator.getRegionLocations(AsyncNonMetaRegionLocator.java:529) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRegionLocator.lambda$getRegionLocation$7(AsyncRegionLocator.java:164) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRegionLocator.tracedLocationFuture(AsyncRegionLocator.java:106) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRegionLocator.getRegionLocation(AsyncRegionLocator.java:158) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRegionLocator.getRegionLocation(AsyncRegionLocator.java:193) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncSingleRequestRpcRetryingCaller.doCall(AsyncSingleRequestRpcRetryingCaller.java:109) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.lambda$tryScheduleRetry$1(AsyncRpcRetryingCaller.java:139) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$HashedWheelTimeout.run(HashedWheelTimer.java:713) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.ImmediateExecutor.execute(ImmediateExecutor.java:34) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$HashedWheelTimeout.expire(HashedWheelTimer.java:701) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$HashedWheelBucket.expireTimeouts(HashedWheelTimer.java:788) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:501) ~[hbase-shaded-netty-4.1.9.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 34 more 2024-12-05T12:02:30,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=21 2024-12-05T12:02:30,767 INFO [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] handler.UnassignRegionHandler(122): Close d7a3a0fac78c2c670fa3e1a1d368bf66 2024-12-05T12:02:30,767 DEBUG [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] handler.UnassignRegionHandler(136): Unassign region: split region: true: evictCache: true 2024-12-05T12:02:30,768 DEBUG [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] regionserver.HRegion(1722): Closing d7a3a0fac78c2c670fa3e1a1d368bf66, disabling compactions & flushes 2024-12-05T12:02:30,768 INFO [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] regionserver.HRegion(1755): Closing region testExportFileSystemStateWithSplitRegion,,1733400143878.d7a3a0fac78c2c670fa3e1a1d368bf66. 2024-12-05T12:02:30,768 DEBUG [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] regionserver.HRegion(1776): Time limited wait for close lock on testExportFileSystemStateWithSplitRegion,,1733400143878.d7a3a0fac78c2c670fa3e1a1d368bf66. 2024-12-05T12:02:30,768 DEBUG [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] regionserver.HRegion(1843): Acquired close lock on testExportFileSystemStateWithSplitRegion,,1733400143878.d7a3a0fac78c2c670fa3e1a1d368bf66. after waiting 0 ms 2024-12-05T12:02:30,768 DEBUG [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] regionserver.HRegion(1853): Updates disabled for region testExportFileSystemStateWithSplitRegion,,1733400143878.d7a3a0fac78c2c670fa3e1a1d368bf66. 2024-12-05T12:02:30,775 DEBUG [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testExportFileSystemStateWithSplitRegion/d7a3a0fac78c2c670fa3e1a1d368bf66/recovered.edits/6.seqid, newMaxSeqId=6, maxSeqId=1 2024-12-05T12:02:30,778 DEBUG [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-05T12:02:30,779 INFO [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] regionserver.HRegion(1973): Closed testExportFileSystemStateWithSplitRegion,,1733400143878.d7a3a0fac78c2c670fa3e1a1d368bf66. 2024-12-05T12:02:30,779 DEBUG [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] regionserver.HRegion(1676): Region close journal for d7a3a0fac78c2c670fa3e1a1d368bf66: Waiting for close lock at 1733400150768Running coprocessor pre-close hooks at 1733400150768Disabling compacts and flushes for region at 1733400150768Disabling writes for close at 1733400150768Writing region close event to WAL at 1733400150770 (+2 ms)Running coprocessor post-close hooks at 1733400150776 (+6 ms)Closed at 1733400150779 (+3 ms) 2024-12-05T12:02:30,782 INFO [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] handler.UnassignRegionHandler(157): Closed d7a3a0fac78c2c670fa3e1a1d368bf66 2024-12-05T12:02:30,783 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=22 updating hbase:meta row=d7a3a0fac78c2c670fa3e1a1d368bf66, regionState=CLOSED 2024-12-05T12:02:30,785 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=23, ppid=22, state=RUNNABLE, hasLock=false; CloseRegionProcedure d7a3a0fac78c2c670fa3e1a1d368bf66, server=80666a11a09f,42305,1733400131012 because future has completed 2024-12-05T12:02:30,789 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=23, resume processing ppid=22 2024-12-05T12:02:30,790 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=23, ppid=22, state=SUCCESS, hasLock=false; CloseRegionProcedure d7a3a0fac78c2c670fa3e1a1d368bf66, server=80666a11a09f,42305,1733400131012 in 179 msec 2024-12-05T12:02:30,792 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=22, resume processing ppid=21 2024-12-05T12:02:30,792 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=22, ppid=21, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=d7a3a0fac78c2c670fa3e1a1d368bf66, UNASSIGN in 191 msec 2024-12-05T12:02:30,803 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T12:02:30,806 INFO [PEWorker-3 {}] assignment.SplitTableRegionProcedure(728): pid=21 splitting 1 storefiles, region=d7a3a0fac78c2c670fa3e1a1d368bf66, threads=1 2024-12-05T12:02:30,808 DEBUG [StoreFileSplitter-pool-0 {}] assignment.SplitTableRegionProcedure(823): pid=21 splitting started for store file: hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testExportFileSystemStateWithSplitRegion/d7a3a0fac78c2c670fa3e1a1d368bf66/cf/540ad97146874d80af9cb7c5229f6232_SeqId_4_ for region: d7a3a0fac78c2c670fa3e1a1d368bf66 2024-12-05T12:02:30,815 DEBUG [StoreFileSplitter-pool-0 {}] regionserver.HStoreFile(483): HFile Bloom filter type for 540ad97146874d80af9cb7c5229f6232_SeqId_4_: NONE, but ROW specified in column family configuration 2024-12-05T12:02:30,834 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073741864_1040 (size=21) 2024-12-05T12:02:30,839 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073741864_1040 (size=21) 2024-12-05T12:02:30,839 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073741864_1040 (size=21) 2024-12-05T12:02:30,852 DEBUG [StoreFileSplitter-pool-0 {}] regionserver.HStoreFile(483): HFile Bloom filter type for 540ad97146874d80af9cb7c5229f6232_SeqId_4_: NONE, but ROW specified in column family configuration 2024-12-05T12:02:30,897 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073741865_1041 (size=21) 2024-12-05T12:02:30,897 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073741865_1041 (size=21) 2024-12-05T12:02:30,897 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073741865_1041 (size=21) 2024-12-05T12:02:30,899 DEBUG [StoreFileSplitter-pool-0 {}] assignment.SplitTableRegionProcedure(834): pid=21 splitting complete for store file: hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testExportFileSystemStateWithSplitRegion/d7a3a0fac78c2c670fa3e1a1d368bf66/cf/540ad97146874d80af9cb7c5229f6232_SeqId_4_ for region: d7a3a0fac78c2c670fa3e1a1d368bf66 2024-12-05T12:02:30,902 DEBUG [PEWorker-3 {}] assignment.SplitTableRegionProcedure(802): pid=21 split storefiles for region d7a3a0fac78c2c670fa3e1a1d368bf66 Daughter A: [hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testExportFileSystemStateWithSplitRegion/b528516e028fdfdc1aaa2ac017ae9a25/cf/540ad97146874d80af9cb7c5229f6232_SeqId_4_.d7a3a0fac78c2c670fa3e1a1d368bf66] storefiles, Daughter B: [hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testExportFileSystemStateWithSplitRegion/854c42f125a98370341f93094bde91b0/cf/540ad97146874d80af9cb7c5229f6232_SeqId_4_.d7a3a0fac78c2c670fa3e1a1d368bf66] storefiles. 2024-12-05T12:02:30,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=21 2024-12-05T12:02:30,929 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073741866_1042 (size=76) 2024-12-05T12:02:30,929 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073741866_1042 (size=76) 2024-12-05T12:02:30,930 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073741866_1042 (size=76) 2024-12-05T12:02:30,941 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T12:02:30,976 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073741867_1043 (size=76) 2024-12-05T12:02:30,976 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073741867_1043 (size=76) 2024-12-05T12:02:30,976 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073741867_1043 (size=76) 2024-12-05T12:02:30,978 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T12:02:30,988 DEBUG [PEWorker-3 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testExportFileSystemStateWithSplitRegion/b528516e028fdfdc1aaa2ac017ae9a25/recovered.edits/6.seqid, newMaxSeqId=6, maxSeqId=-1 2024-12-05T12:02:30,991 DEBUG [PEWorker-3 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testExportFileSystemStateWithSplitRegion/854c42f125a98370341f93094bde91b0/recovered.edits/6.seqid, newMaxSeqId=6, maxSeqId=-1 2024-12-05T12:02:30,994 DEBUG [PEWorker-3 {}] assignment.RegionStateStore(723): Put {"totalColumns":3,"row":"testExportFileSystemStateWithSplitRegion,,1733400143878.d7a3a0fac78c2c670fa3e1a1d368bf66.","families":{"info":[{"qualifier":"regioninfo","vlen":74,"tag":[],"timestamp":"1733400150993"},{"qualifier":"splitA","vlen":75,"tag":[],"timestamp":"1733400150993"},{"qualifier":"splitB","vlen":75,"tag":[],"timestamp":"1733400150993"}]},"ts":"1733400150993"} 2024-12-05T12:02:30,994 DEBUG [PEWorker-3 {}] assignment.RegionStateStore(723): Put {"totalColumns":3,"row":"testExportFileSystemStateWithSplitRegion,,1733400150584.b528516e028fdfdc1aaa2ac017ae9a25.","families":{"info":[{"qualifier":"regioninfo","vlen":75,"tag":[],"timestamp":"1733400150993"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733400150993"},{"qualifier":"seqnumDuringOpen","vlen":8,"tag":[],"timestamp":"1733400150993"}]},"ts":"1733400150993"} 2024-12-05T12:02:30,994 DEBUG [PEWorker-3 {}] assignment.RegionStateStore(723): Put {"totalColumns":3,"row":"testExportFileSystemStateWithSplitRegion,5,1733400150584.854c42f125a98370341f93094bde91b0.","families":{"info":[{"qualifier":"regioninfo","vlen":75,"tag":[],"timestamp":"1733400150993"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733400150993"},{"qualifier":"seqnumDuringOpen","vlen":8,"tag":[],"timestamp":"1733400150993"}]},"ts":"1733400150993"} 2024-12-05T12:02:31,007 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=24, ppid=21, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=b528516e028fdfdc1aaa2ac017ae9a25, ASSIGN}, {pid=25, ppid=21, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=854c42f125a98370341f93094bde91b0, ASSIGN}] 2024-12-05T12:02:31,009 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=25, ppid=21, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=854c42f125a98370341f93094bde91b0, ASSIGN 2024-12-05T12:02:31,009 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=24, ppid=21, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=b528516e028fdfdc1aaa2ac017ae9a25, ASSIGN 2024-12-05T12:02:31,009 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=24, ppid=21, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=b528516e028fdfdc1aaa2ac017ae9a25, ASSIGN; state=SPLITTING_NEW, location=80666a11a09f,42305,1733400131012; forceNewPlan=false, retain=false 2024-12-05T12:02:31,009 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(269): Starting pid=25, ppid=21, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=854c42f125a98370341f93094bde91b0, ASSIGN; state=SPLITTING_NEW, location=80666a11a09f,42305,1733400131012; forceNewPlan=false, retain=false 2024-12-05T12:02:31,160 INFO [80666a11a09f:34947 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-12-05T12:02:31,160 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=24 updating hbase:meta row=b528516e028fdfdc1aaa2ac017ae9a25, regionState=OPENING, regionLocation=80666a11a09f,42305,1733400131012 2024-12-05T12:02:31,160 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=25 updating hbase:meta row=854c42f125a98370341f93094bde91b0, regionState=OPENING, regionLocation=80666a11a09f,42305,1733400131012 2024-12-05T12:02:31,163 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=24, ppid=21, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=b528516e028fdfdc1aaa2ac017ae9a25, ASSIGN because future has completed 2024-12-05T12:02:31,163 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=26, ppid=24, state=RUNNABLE, hasLock=false; OpenRegionProcedure b528516e028fdfdc1aaa2ac017ae9a25, server=80666a11a09f,42305,1733400131012}] 2024-12-05T12:02:31,164 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=25, ppid=21, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=854c42f125a98370341f93094bde91b0, ASSIGN because future has completed 2024-12-05T12:02:31,165 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=27, ppid=25, state=RUNNABLE, hasLock=false; OpenRegionProcedure 854c42f125a98370341f93094bde91b0, server=80666a11a09f,42305,1733400131012}] 2024-12-05T12:02:31,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=21 2024-12-05T12:02:31,322 INFO [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] handler.AssignRegionHandler(132): Open testExportFileSystemStateWithSplitRegion,,1733400150584.b528516e028fdfdc1aaa2ac017ae9a25. 2024-12-05T12:02:31,322 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.HRegion(7752): Opening region: {ENCODED => b528516e028fdfdc1aaa2ac017ae9a25, NAME => 'testExportFileSystemStateWithSplitRegion,,1733400150584.b528516e028fdfdc1aaa2ac017ae9a25.', STARTKEY => '', ENDKEY => '5'} 2024-12-05T12:02:31,323 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.HRegion(8280): Registered coprocessor service: region=testExportFileSystemStateWithSplitRegion,,1733400150584.b528516e028fdfdc1aaa2ac017ae9a25. service=AccessControlService 2024-12-05T12:02:31,323 INFO [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-05T12:02:31,324 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testExportFileSystemStateWithSplitRegion b528516e028fdfdc1aaa2ac017ae9a25 2024-12-05T12:02:31,324 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.HRegion(898): Instantiated testExportFileSystemStateWithSplitRegion,,1733400150584.b528516e028fdfdc1aaa2ac017ae9a25.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-05T12:02:31,324 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.HRegion(7794): checking encryption for b528516e028fdfdc1aaa2ac017ae9a25 2024-12-05T12:02:31,324 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.HRegion(7797): checking classloading for b528516e028fdfdc1aaa2ac017ae9a25 2024-12-05T12:02:31,327 INFO [StoreOpener-b528516e028fdfdc1aaa2ac017ae9a25-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region b528516e028fdfdc1aaa2ac017ae9a25 2024-12-05T12:02:31,329 INFO [StoreOpener-b528516e028fdfdc1aaa2ac017ae9a25-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region b528516e028fdfdc1aaa2ac017ae9a25 columnFamilyName cf 2024-12-05T12:02:31,329 DEBUG [StoreOpener-b528516e028fdfdc1aaa2ac017ae9a25-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T12:02:31,341 DEBUG [StoreFileOpener-b528516e028fdfdc1aaa2ac017ae9a25-cf-1 {}] regionserver.HStoreFile(483): HFile Bloom filter type for 540ad97146874d80af9cb7c5229f6232_SeqId_4_.d7a3a0fac78c2c670fa3e1a1d368bf66: NONE, but ROW specified in column family configuration 2024-12-05T12:02:31,356 DEBUG [StoreOpener-b528516e028fdfdc1aaa2ac017ae9a25-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testExportFileSystemStateWithSplitRegion/b528516e028fdfdc1aaa2ac017ae9a25/cf/540ad97146874d80af9cb7c5229f6232_SeqId_4_.d7a3a0fac78c2c670fa3e1a1d368bf66->hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testExportFileSystemStateWithSplitRegion/d7a3a0fac78c2c670fa3e1a1d368bf66/cf/540ad97146874d80af9cb7c5229f6232_SeqId_4_-bottom 2024-12-05T12:02:31,356 INFO [StoreOpener-b528516e028fdfdc1aaa2ac017ae9a25-1 {}] regionserver.HStore(327): Store=b528516e028fdfdc1aaa2ac017ae9a25/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-05T12:02:31,356 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.HRegion(1038): replaying wal for b528516e028fdfdc1aaa2ac017ae9a25 2024-12-05T12:02:31,357 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testExportFileSystemStateWithSplitRegion/b528516e028fdfdc1aaa2ac017ae9a25 2024-12-05T12:02:31,359 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testExportFileSystemStateWithSplitRegion/b528516e028fdfdc1aaa2ac017ae9a25 2024-12-05T12:02:31,359 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.HRegion(1048): stopping wal replay for b528516e028fdfdc1aaa2ac017ae9a25 2024-12-05T12:02:31,359 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.HRegion(1060): Cleaning up temporary data for b528516e028fdfdc1aaa2ac017ae9a25 2024-12-05T12:02:31,361 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.HRegion(1093): writing seq id for b528516e028fdfdc1aaa2ac017ae9a25 2024-12-05T12:02:31,362 INFO [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.HRegion(1114): Opened b528516e028fdfdc1aaa2ac017ae9a25; next sequenceid=7; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=63476521, jitterRate=-0.05412612855434418}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-05T12:02:31,362 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.HRegion(1122): Running coprocessor post-open hooks for b528516e028fdfdc1aaa2ac017ae9a25 2024-12-05T12:02:31,363 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.HRegion(1006): Region open journal for b528516e028fdfdc1aaa2ac017ae9a25: Running coprocessor pre-open hook at 1733400151324Writing region info on filesystem at 1733400151324Initializing all the Stores at 1733400151326 (+2 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733400151326Cleaning up temporary data from old regions at 1733400151360 (+34 ms)Running coprocessor post-open hooks at 1733400151363 (+3 ms)Region opened successfully at 1733400151363 2024-12-05T12:02:31,364 INFO [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.HRegionServer(2236): Post open deploy tasks for testExportFileSystemStateWithSplitRegion,,1733400150584.b528516e028fdfdc1aaa2ac017ae9a25., pid=26, masterSystemTime=1733400151316 2024-12-05T12:02:31,365 INFO [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.CompactSplit(342): Ignoring compaction request for testExportFileSystemStateWithSplitRegion,,1733400150584.b528516e028fdfdc1aaa2ac017ae9a25.,because compaction is disabled. 2024-12-05T12:02:31,367 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.HRegionServer(2266): Finished post open deploy task for testExportFileSystemStateWithSplitRegion,,1733400150584.b528516e028fdfdc1aaa2ac017ae9a25. 2024-12-05T12:02:31,367 INFO [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] handler.AssignRegionHandler(153): Opened testExportFileSystemStateWithSplitRegion,,1733400150584.b528516e028fdfdc1aaa2ac017ae9a25. 2024-12-05T12:02:31,367 INFO [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] handler.AssignRegionHandler(132): Open testExportFileSystemStateWithSplitRegion,5,1733400150584.854c42f125a98370341f93094bde91b0. 2024-12-05T12:02:31,368 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.HRegion(7752): Opening region: {ENCODED => 854c42f125a98370341f93094bde91b0, NAME => 'testExportFileSystemStateWithSplitRegion,5,1733400150584.854c42f125a98370341f93094bde91b0.', STARTKEY => '5', ENDKEY => ''} 2024-12-05T12:02:31,368 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=24 updating hbase:meta row=b528516e028fdfdc1aaa2ac017ae9a25, regionState=OPEN, openSeqNum=7, regionLocation=80666a11a09f,42305,1733400131012 2024-12-05T12:02:31,368 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.HRegion(8280): Registered coprocessor service: region=testExportFileSystemStateWithSplitRegion,5,1733400150584.854c42f125a98370341f93094bde91b0. service=AccessControlService 2024-12-05T12:02:31,368 INFO [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-05T12:02:31,368 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testExportFileSystemStateWithSplitRegion 854c42f125a98370341f93094bde91b0 2024-12-05T12:02:31,368 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.HRegion(898): Instantiated testExportFileSystemStateWithSplitRegion,5,1733400150584.854c42f125a98370341f93094bde91b0.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-05T12:02:31,368 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.HRegion(7794): checking encryption for 854c42f125a98370341f93094bde91b0 2024-12-05T12:02:31,369 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.HRegion(7797): checking classloading for 854c42f125a98370341f93094bde91b0 2024-12-05T12:02:31,371 INFO [StoreOpener-854c42f125a98370341f93094bde91b0-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 854c42f125a98370341f93094bde91b0 2024-12-05T12:02:31,372 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=26, ppid=24, state=RUNNABLE, hasLock=false; OpenRegionProcedure b528516e028fdfdc1aaa2ac017ae9a25, server=80666a11a09f,42305,1733400131012 because future has completed 2024-12-05T12:02:31,372 INFO [StoreOpener-854c42f125a98370341f93094bde91b0-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 854c42f125a98370341f93094bde91b0 columnFamilyName cf 2024-12-05T12:02:31,372 DEBUG [StoreOpener-854c42f125a98370341f93094bde91b0-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T12:02:31,378 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=26, resume processing ppid=24 2024-12-05T12:02:31,378 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=26, ppid=24, state=SUCCESS, hasLock=false; OpenRegionProcedure b528516e028fdfdc1aaa2ac017ae9a25, server=80666a11a09f,42305,1733400131012 in 210 msec 2024-12-05T12:02:31,381 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=24, ppid=21, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=b528516e028fdfdc1aaa2ac017ae9a25, ASSIGN in 371 msec 2024-12-05T12:02:31,386 DEBUG [StoreFileOpener-854c42f125a98370341f93094bde91b0-cf-1 {}] regionserver.HStoreFile(483): HFile Bloom filter type for 540ad97146874d80af9cb7c5229f6232_SeqId_4_.d7a3a0fac78c2c670fa3e1a1d368bf66: NONE, but ROW specified in column family configuration 2024-12-05T12:02:31,388 DEBUG [StoreOpener-854c42f125a98370341f93094bde91b0-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testExportFileSystemStateWithSplitRegion/854c42f125a98370341f93094bde91b0/cf/540ad97146874d80af9cb7c5229f6232_SeqId_4_.d7a3a0fac78c2c670fa3e1a1d368bf66->hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testExportFileSystemStateWithSplitRegion/d7a3a0fac78c2c670fa3e1a1d368bf66/cf/540ad97146874d80af9cb7c5229f6232_SeqId_4_-top 2024-12-05T12:02:31,389 INFO [StoreOpener-854c42f125a98370341f93094bde91b0-1 {}] regionserver.HStore(327): Store=854c42f125a98370341f93094bde91b0/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-05T12:02:31,389 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.HRegion(1038): replaying wal for 854c42f125a98370341f93094bde91b0 2024-12-05T12:02:31,390 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testExportFileSystemStateWithSplitRegion/854c42f125a98370341f93094bde91b0 2024-12-05T12:02:31,392 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testExportFileSystemStateWithSplitRegion/854c42f125a98370341f93094bde91b0 2024-12-05T12:02:31,392 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.HRegion(1048): stopping wal replay for 854c42f125a98370341f93094bde91b0 2024-12-05T12:02:31,393 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.HRegion(1060): Cleaning up temporary data for 854c42f125a98370341f93094bde91b0 2024-12-05T12:02:31,394 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.HRegion(1093): writing seq id for 854c42f125a98370341f93094bde91b0 2024-12-05T12:02:31,396 INFO [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.HRegion(1114): Opened 854c42f125a98370341f93094bde91b0; next sequenceid=7; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=73793092, jitterRate=0.09960275888442993}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-05T12:02:31,396 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 854c42f125a98370341f93094bde91b0 2024-12-05T12:02:31,396 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.HRegion(1006): Region open journal for 854c42f125a98370341f93094bde91b0: Running coprocessor pre-open hook at 1733400151369Writing region info on filesystem at 1733400151369Initializing all the Stores at 1733400151370 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733400151370Cleaning up temporary data from old regions at 1733400151393 (+23 ms)Running coprocessor post-open hooks at 1733400151396 (+3 ms)Region opened successfully at 1733400151396 2024-12-05T12:02:31,397 INFO [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.HRegionServer(2236): Post open deploy tasks for testExportFileSystemStateWithSplitRegion,5,1733400150584.854c42f125a98370341f93094bde91b0., pid=27, masterSystemTime=1733400151316 2024-12-05T12:02:31,397 INFO [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.CompactSplit(342): Ignoring compaction request for testExportFileSystemStateWithSplitRegion,5,1733400150584.854c42f125a98370341f93094bde91b0.,because compaction is disabled. 2024-12-05T12:02:31,400 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.HRegionServer(2266): Finished post open deploy task for testExportFileSystemStateWithSplitRegion,5,1733400150584.854c42f125a98370341f93094bde91b0. 2024-12-05T12:02:31,400 INFO [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] handler.AssignRegionHandler(153): Opened testExportFileSystemStateWithSplitRegion,5,1733400150584.854c42f125a98370341f93094bde91b0. 2024-12-05T12:02:31,401 WARN [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34947 {}] assignment.AssignmentManager(1543): Unable to acquire lock for regionNode state=OPEN, location=80666a11a09f,42305,1733400131012, table=testExportFileSystemStateWithSplitRegion, region=854c42f125a98370341f93094bde91b0. It is likely that another thread is currently holding the lock. To avoid deadlock, skip execution for now. 2024-12-05T12:02:31,401 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=25 updating hbase:meta row=854c42f125a98370341f93094bde91b0, regionState=OPEN, openSeqNum=7, regionLocation=80666a11a09f,42305,1733400131012 2024-12-05T12:02:31,404 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=27, ppid=25, state=RUNNABLE, hasLock=false; OpenRegionProcedure 854c42f125a98370341f93094bde91b0, server=80666a11a09f,42305,1733400131012 because future has completed 2024-12-05T12:02:31,408 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=27, resume processing ppid=25 2024-12-05T12:02:31,409 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=27, ppid=25, state=SUCCESS, hasLock=false; OpenRegionProcedure 854c42f125a98370341f93094bde91b0, server=80666a11a09f,42305,1733400131012 in 241 msec 2024-12-05T12:02:31,411 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=25, resume processing ppid=21 2024-12-05T12:02:31,411 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=25, ppid=21, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=854c42f125a98370341f93094bde91b0, ASSIGN in 401 msec 2024-12-05T12:02:31,413 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=21, state=SUCCESS, hasLock=false; SplitTableRegionProcedure table=testExportFileSystemStateWithSplitRegion, parent=d7a3a0fac78c2c670fa3e1a1d368bf66, daughterA=b528516e028fdfdc1aaa2ac017ae9a25, daughterB=854c42f125a98370341f93094bde91b0 in 826 msec 2024-12-05T12:02:31,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=21 2024-12-05T12:02:31,728 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportFileSystemStateWithSplitRegion,, stopping at row=testExportFileSystemStateWithSplitRegion ,, for max=2147483647 with caching=100 2024-12-05T12:02:31,728 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SPLIT_REGION, Table Name: default:testExportFileSystemStateWithSplitRegion completed 2024-12-05T12:02:31,732 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snapshot-testExportFileSystemStateWithSplitRegion table=testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } 2024-12-05T12:02:31,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1733400151732 (current time:1733400151732). 2024-12-05T12:02:31,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-05T12:02:31,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snapshot-testExportFileSystemStateWithSplitRegion VERSION not specified, setting to 2 2024-12-05T12:02:31,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-05T12:02:31,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@c0c329c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T12:02:31,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] client.ClusterIdFetcher(90): Going to request 80666a11a09f,34947,-1 for getting cluster id 2024-12-05T12:02:31,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-05T12:02:31,734 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'af3bc8f5-5d92-4279-9ca6-4b73c59b8f54' 2024-12-05T12:02:31,734 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-05T12:02:31,734 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "af3bc8f5-5d92-4279-9ca6-4b73c59b8f54" 2024-12-05T12:02:31,735 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@77449b6c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T12:02:31,735 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [80666a11a09f,34947,-1] 2024-12-05T12:02:31,735 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-05T12:02:31,735 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T12:02:31,736 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45500, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-05T12:02:31,736 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3573fb98, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T12:02:31,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-05T12:02:31,738 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=80666a11a09f,37629,1733400130958, seqNum=-1] 2024-12-05T12:02:31,738 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-05T12:02:31,739 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52628, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-05T12:02:31,741 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947. 2024-12-05T12:02:31,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-05T12:02:31,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T12:02:31,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T12:02:31,741 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-05T12:02:31,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@473f91d6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T12:02:31,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] client.ClusterIdFetcher(90): Going to request 80666a11a09f,34947,-1 for getting cluster id 2024-12-05T12:02:31,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-05T12:02:31,743 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'af3bc8f5-5d92-4279-9ca6-4b73c59b8f54' 2024-12-05T12:02:31,743 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-05T12:02:31,743 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "af3bc8f5-5d92-4279-9ca6-4b73c59b8f54" 2024-12-05T12:02:31,744 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@11fbb091, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T12:02:31,744 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [80666a11a09f,34947,-1] 2024-12-05T12:02:31,744 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-05T12:02:31,745 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T12:02:31,745 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45520, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-05T12:02:31,746 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5c78e0a3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T12:02:31,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-05T12:02:31,748 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=80666a11a09f,37629,1733400130958, seqNum=-1] 2024-12-05T12:02:31,748 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-05T12:02:31,749 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52634, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-05T12:02:31,751 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testExportFileSystemStateWithSplitRegion', locateType=CURRENT is [region=hbase:acl,,1733400133831.02119c1ced80736cbd0ab02ca19ee30c., hostname=80666a11a09f,42305,1733400131012, seqNum=2] 2024-12-05T12:02:31,751 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-05T12:02:31,752 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51236, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-05T12:02:31,755 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947. 2024-12-05T12:02:31,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-05T12:02:31,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T12:02:31,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T12:02:31,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] access.PermissionStorage(613): Read acl: entry[testExportFileSystemStateWithSplitRegion], kv [jenkins: RWXCA] 2024-12-05T12:02:31,756 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-05T12:02:31,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-05T12:02:31,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] procedure2.ProcedureExecutor(1139): Stored pid=28, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=28, snapshot={ ss=snapshot-testExportFileSystemStateWithSplitRegion table=testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } 2024-12-05T12:02:31,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snapshot-testExportFileSystemStateWithSplitRegion table=testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 }, snapshot procedure id = 28 2024-12-05T12:02:31,759 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=28, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=28, snapshot={ ss=snapshot-testExportFileSystemStateWithSplitRegion table=testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-05T12:02:31,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=28 2024-12-05T12:02:31,761 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=28, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=28, snapshot={ ss=snapshot-testExportFileSystemStateWithSplitRegion table=testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-05T12:02:31,764 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=28, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=28, snapshot={ ss=snapshot-testExportFileSystemStateWithSplitRegion table=testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-05T12:02:31,772 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073741868_1044 (size=197) 2024-12-05T12:02:31,772 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073741868_1044 (size=197) 2024-12-05T12:02:31,772 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073741868_1044 (size=197) 2024-12-05T12:02:31,774 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=28, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=28, snapshot={ ss=snapshot-testExportFileSystemStateWithSplitRegion table=testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-05T12:02:31,774 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=29, ppid=28, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure b528516e028fdfdc1aaa2ac017ae9a25}, {pid=30, ppid=28, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 854c42f125a98370341f93094bde91b0}] 2024-12-05T12:02:31,775 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=30, ppid=28, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 854c42f125a98370341f93094bde91b0 2024-12-05T12:02:31,775 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=29, ppid=28, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure b528516e028fdfdc1aaa2ac017ae9a25 2024-12-05T12:02:31,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=28 2024-12-05T12:02:31,927 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42305 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=30 2024-12-05T12:02:31,928 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42305 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=29 2024-12-05T12:02:31,928 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=30}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testExportFileSystemStateWithSplitRegion,5,1733400150584.854c42f125a98370341f93094bde91b0. 2024-12-05T12:02:31,928 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=29}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testExportFileSystemStateWithSplitRegion,,1733400150584.b528516e028fdfdc1aaa2ac017ae9a25. 2024-12-05T12:02:31,928 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=30}] regionserver.HRegion(2603): Flush status journal for 854c42f125a98370341f93094bde91b0: 2024-12-05T12:02:31,928 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=29}] regionserver.HRegion(2603): Flush status journal for b528516e028fdfdc1aaa2ac017ae9a25: 2024-12-05T12:02:31,928 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=30}] regionserver.SnapshotRegionCallable(75): Snapshotting region testExportFileSystemStateWithSplitRegion,5,1733400150584.854c42f125a98370341f93094bde91b0. for snapshot-testExportFileSystemStateWithSplitRegion completed. 2024-12-05T12:02:31,928 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=29}] regionserver.SnapshotRegionCallable(75): Snapshotting region testExportFileSystemStateWithSplitRegion,,1733400150584.b528516e028fdfdc1aaa2ac017ae9a25. for snapshot-testExportFileSystemStateWithSplitRegion completed. 2024-12-05T12:02:31,929 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=29}] snapshot.SnapshotManifest(241): Storing 'testExportFileSystemStateWithSplitRegion,,1733400150584.b528516e028fdfdc1aaa2ac017ae9a25.' region-info for snapshot=snapshot-testExportFileSystemStateWithSplitRegion 2024-12-05T12:02:31,929 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=30}] snapshot.SnapshotManifest(241): Storing 'testExportFileSystemStateWithSplitRegion,5,1733400150584.854c42f125a98370341f93094bde91b0.' region-info for snapshot=snapshot-testExportFileSystemStateWithSplitRegion 2024-12-05T12:02:31,929 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=30}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-05T12:02:31,929 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=29}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-05T12:02:31,929 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=30}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testExportFileSystemStateWithSplitRegion/854c42f125a98370341f93094bde91b0/cf/540ad97146874d80af9cb7c5229f6232_SeqId_4_.d7a3a0fac78c2c670fa3e1a1d368bf66->hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testExportFileSystemStateWithSplitRegion/d7a3a0fac78c2c670fa3e1a1d368bf66/cf/540ad97146874d80af9cb7c5229f6232_SeqId_4_-top] hfiles 2024-12-05T12:02:31,929 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=29}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testExportFileSystemStateWithSplitRegion/b528516e028fdfdc1aaa2ac017ae9a25/cf/540ad97146874d80af9cb7c5229f6232_SeqId_4_.d7a3a0fac78c2c670fa3e1a1d368bf66->hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testExportFileSystemStateWithSplitRegion/d7a3a0fac78c2c670fa3e1a1d368bf66/cf/540ad97146874d80af9cb7c5229f6232_SeqId_4_-bottom] hfiles 2024-12-05T12:02:31,929 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=29}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testExportFileSystemStateWithSplitRegion/b528516e028fdfdc1aaa2ac017ae9a25/cf/540ad97146874d80af9cb7c5229f6232_SeqId_4_.d7a3a0fac78c2c670fa3e1a1d368bf66 for snapshot=snapshot-testExportFileSystemStateWithSplitRegion 2024-12-05T12:02:31,929 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=30}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testExportFileSystemStateWithSplitRegion/854c42f125a98370341f93094bde91b0/cf/540ad97146874d80af9cb7c5229f6232_SeqId_4_.d7a3a0fac78c2c670fa3e1a1d368bf66 for snapshot=snapshot-testExportFileSystemStateWithSplitRegion 2024-12-05T12:02:31,945 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073741869_1045 (size=182) 2024-12-05T12:02:31,945 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073741870_1046 (size=182) 2024-12-05T12:02:31,945 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073741870_1046 (size=182) 2024-12-05T12:02:31,945 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073741869_1045 (size=182) 2024-12-05T12:02:31,946 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=29}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testExportFileSystemStateWithSplitRegion,,1733400150584.b528516e028fdfdc1aaa2ac017ae9a25. 2024-12-05T12:02:31,946 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073741870_1046 (size=182) 2024-12-05T12:02:31,946 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=29}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=29 2024-12-05T12:02:31,946 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073741869_1045 (size=182) 2024-12-05T12:02:31,946 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=30}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testExportFileSystemStateWithSplitRegion,5,1733400150584.854c42f125a98370341f93094bde91b0. 2024-12-05T12:02:31,947 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=30}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=30 2024-12-05T12:02:31,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] master.HMaster(4169): Remote procedure done, pid=29 2024-12-05T12:02:31,947 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snapshot-testExportFileSystemStateWithSplitRegion on region b528516e028fdfdc1aaa2ac017ae9a25 2024-12-05T12:02:31,947 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=29, ppid=28, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure b528516e028fdfdc1aaa2ac017ae9a25 2024-12-05T12:02:31,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] master.HMaster(4169): Remote procedure done, pid=30 2024-12-05T12:02:31,948 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snapshot-testExportFileSystemStateWithSplitRegion on region 854c42f125a98370341f93094bde91b0 2024-12-05T12:02:31,948 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=30, ppid=28, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 854c42f125a98370341f93094bde91b0 2024-12-05T12:02:31,950 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=29, ppid=28, state=SUCCESS, hasLock=false; SnapshotRegionProcedure b528516e028fdfdc1aaa2ac017ae9a25 in 174 msec 2024-12-05T12:02:31,951 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=30, resume processing ppid=28 2024-12-05T12:02:31,951 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=30, ppid=28, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 854c42f125a98370341f93094bde91b0 in 175 msec 2024-12-05T12:02:31,951 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=28, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=28, snapshot={ ss=snapshot-testExportFileSystemStateWithSplitRegion table=testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-05T12:02:31,953 DEBUG [SplitRegionsSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-12-05T12:02:31,953 DEBUG [SplitRegionsSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-12-05T12:02:31,953 DEBUG [SplitRegionsSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T12:02:31,954 DEBUG [SplitRegionsSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(366): Adding snapshot references for [hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testExportFileSystemStateWithSplitRegion/d7a3a0fac78c2c670fa3e1a1d368bf66/cf/540ad97146874d80af9cb7c5229f6232_SeqId_4_] hfiles 2024-12-05T12:02:31,954 DEBUG [SplitRegionsSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (1/1): hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testExportFileSystemStateWithSplitRegion/d7a3a0fac78c2c670fa3e1a1d368bf66/cf/540ad97146874d80af9cb7c5229f6232_SeqId_4_ 2024-12-05T12:02:31,963 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073741871_1047 (size=129) 2024-12-05T12:02:31,963 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073741871_1047 (size=129) 2024-12-05T12:02:31,964 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073741871_1047 (size=129) 2024-12-05T12:02:31,964 INFO [SplitRegionsSnapshotPool-pool-0 {}] procedure.SnapshotProcedure$1(378): take snapshot region={ENCODED => d7a3a0fac78c2c670fa3e1a1d368bf66, NAME => 'testExportFileSystemStateWithSplitRegion,,1733400143878.d7a3a0fac78c2c670fa3e1a1d368bf66.', STARTKEY => '', ENDKEY => '', OFFLINE => true, SPLIT => true}, table=testExportFileSystemStateWithSplitRegion 2024-12-05T12:02:31,965 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=28, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=28, snapshot={ ss=snapshot-testExportFileSystemStateWithSplitRegion table=testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-05T12:02:31,966 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=28, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=28, snapshot={ ss=snapshot-testExportFileSystemStateWithSplitRegion table=testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-05T12:02:31,966 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snapshot-testExportFileSystemStateWithSplitRegion 2024-12-05T12:02:31,967 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/.hbase-snapshot/.tmp/snapshot-testExportFileSystemStateWithSplitRegion 2024-12-05T12:02:31,982 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073741872_1048 (size=891) 2024-12-05T12:02:31,982 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073741872_1048 (size=891) 2024-12-05T12:02:31,982 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073741872_1048 (size=891) 2024-12-05T12:02:31,986 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=28, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=28, snapshot={ ss=snapshot-testExportFileSystemStateWithSplitRegion table=testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-05T12:02:31,993 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=28, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=28, snapshot={ ss=snapshot-testExportFileSystemStateWithSplitRegion table=testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-05T12:02:31,993 DEBUG [PEWorker-1 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/.hbase-snapshot/.tmp/snapshot-testExportFileSystemStateWithSplitRegion to hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/.hbase-snapshot/snapshot-testExportFileSystemStateWithSplitRegion 2024-12-05T12:02:31,995 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=28, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=28, snapshot={ ss=snapshot-testExportFileSystemStateWithSplitRegion table=testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-05T12:02:31,996 DEBUG [PEWorker-1 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snapshot-testExportFileSystemStateWithSplitRegion table=testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 }, snapshot procedure id = 28 2024-12-05T12:02:31,998 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=28, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=28, snapshot={ ss=snapshot-testExportFileSystemStateWithSplitRegion table=testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } in 240 msec 2024-12-05T12:02:32,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=28 2024-12-05T12:02:32,078 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testExportFileSystemStateWithSplitRegion completed 2024-12-05T12:02:32,078 INFO [Time-limited test {}] snapshot.TestExportSnapshot(515): HDFS export destination path: hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/export-test/export-1733400152078 2024-12-05T12:02:32,079 INFO [Time-limited test {}] snapshot.TestExportSnapshot(542): tgtFsUri=hdfs://localhost:37541, tgtDir=hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/export-test/export-1733400152078, rawTgtDir=hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/export-test/export-1733400152078, srcFsUri=hdfs://localhost:37541, srcDir=hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300 2024-12-05T12:02:32,111 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1094): inputFs=hdfs://localhost:37541, inputRoot=hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300 2024-12-05T12:02:32,111 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1095): outputFs=DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-954888707_22, ugi=jenkins (auth:SIMPLE)]], outputRoot=hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/export-test/export-1733400152078, skipTmp=false, initialOutputSnapshotDir=hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/export-test/export-1733400152078/.hbase-snapshot/.tmp/snapshot-testExportFileSystemStateWithSplitRegion 2024-12-05T12:02:32,115 INFO [Time-limited test {}] snapshot.ExportSnapshot(1104): Verify the source snapshot's expiration status and integrity. 2024-12-05T12:02:32,121 INFO [Time-limited test {}] snapshot.ExportSnapshot(1162): Copy Snapshot Manifest from hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/.hbase-snapshot/snapshot-testExportFileSystemStateWithSplitRegion to hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/export-test/export-1733400152078/.hbase-snapshot/.tmp/snapshot-testExportFileSystemStateWithSplitRegion 2024-12-05T12:02:32,141 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073741873_1049 (size=197) 2024-12-05T12:02:32,141 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073741874_1050 (size=891) 2024-12-05T12:02:32,142 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073741873_1049 (size=197) 2024-12-05T12:02:32,142 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073741873_1049 (size=197) 2024-12-05T12:02:32,142 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073741874_1050 (size=891) 2024-12-05T12:02:32,143 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073741874_1050 (size=891) 2024-12-05T12:02:32,148 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-common/target/hbase-common-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-05T12:02:32,149 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-protocol-shaded/target/hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-05T12:02:32,149 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-client/target/hbase-client-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-05T12:02:33,054 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/b25dcafd-a00c-c924-93f8-4d057d2ca201/hadoop-15002398163507771891.jar 2024-12-05T12:02:33,054 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-05T12:02:33,055 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-05T12:02:33,125 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/b25dcafd-a00c-c924-93f8-4d057d2ca201/hadoop-14733983009795194842.jar 2024-12-05T12:02:33,126 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics/target/hbase-metrics-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-05T12:02:33,126 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics-api/target/hbase-metrics-api-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-05T12:02:33,127 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-replication/target/hbase-replication-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-05T12:02:33,128 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-http/target/hbase-http-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-05T12:02:33,128 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-procedure/target/hbase-procedure-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-05T12:02:33,129 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-zookeeper/target/hbase-zookeeper-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-05T12:02:33,129 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-12-05T12:02:33,129 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-12-05T12:02:33,130 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-12-05T12:02:33,131 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-12-05T12:02:33,131 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-12-05T12:02:33,131 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-12-05T12:02:33,131 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-12-05T12:02:33,132 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-12-05T12:02:33,132 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-12-05T12:02:33,133 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-12-05T12:02:33,133 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-12-05T12:02:33,135 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-05T12:02:33,136 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-05T12:02:33,136 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-05T12:02:33,136 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-05T12:02:33,136 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-05T12:02:33,137 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-05T12:02:33,137 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-05T12:02:33,289 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073741875_1051 (size=131440) 2024-12-05T12:02:33,289 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073741875_1051 (size=131440) 2024-12-05T12:02:33,289 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073741875_1051 (size=131440) 2024-12-05T12:02:33,310 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073741876_1052 (size=4188619) 2024-12-05T12:02:33,311 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073741876_1052 (size=4188619) 2024-12-05T12:02:33,311 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073741876_1052 (size=4188619) 2024-12-05T12:02:33,336 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073741877_1053 (size=1323991) 2024-12-05T12:02:33,336 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073741877_1053 (size=1323991) 2024-12-05T12:02:33,337 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073741877_1053 (size=1323991) 2024-12-05T12:02:33,360 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073741878_1054 (size=903934) 2024-12-05T12:02:33,360 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073741878_1054 (size=903934) 2024-12-05T12:02:33,361 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073741878_1054 (size=903934) 2024-12-05T12:02:33,789 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073741879_1055 (size=8360360) 2024-12-05T12:02:33,789 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073741879_1055 (size=8360360) 2024-12-05T12:02:33,789 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073741879_1055 (size=8360360) 2024-12-05T12:02:33,801 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073741880_1056 (size=1877034) 2024-12-05T12:02:33,801 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073741880_1056 (size=1877034) 2024-12-05T12:02:33,801 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073741880_1056 (size=1877034) 2024-12-05T12:02:33,811 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073741881_1057 (size=77835) 2024-12-05T12:02:33,811 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073741881_1057 (size=77835) 2024-12-05T12:02:33,811 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073741881_1057 (size=77835) 2024-12-05T12:02:33,839 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073741882_1058 (size=6425023) 2024-12-05T12:02:33,839 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073741882_1058 (size=6425023) 2024-12-05T12:02:33,839 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073741882_1058 (size=6425023) 2024-12-05T12:02:33,847 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073741883_1059 (size=30949) 2024-12-05T12:02:33,848 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073741883_1059 (size=30949) 2024-12-05T12:02:33,848 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073741883_1059 (size=30949) 2024-12-05T12:02:33,856 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073741884_1060 (size=443172) 2024-12-05T12:02:33,857 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073741884_1060 (size=443172) 2024-12-05T12:02:33,857 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073741884_1060 (size=443172) 2024-12-05T12:02:33,868 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073741885_1061 (size=1597213) 2024-12-05T12:02:33,868 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073741885_1061 (size=1597213) 2024-12-05T12:02:33,868 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073741885_1061 (size=1597213) 2024-12-05T12:02:33,887 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073741886_1062 (size=4695811) 2024-12-05T12:02:33,888 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073741886_1062 (size=4695811) 2024-12-05T12:02:33,888 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073741886_1062 (size=4695811) 2024-12-05T12:02:33,896 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073741887_1063 (size=232957) 2024-12-05T12:02:33,896 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073741887_1063 (size=232957) 2024-12-05T12:02:33,896 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073741887_1063 (size=232957) 2024-12-05T12:02:33,904 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073741888_1064 (size=127628) 2024-12-05T12:02:33,904 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073741888_1064 (size=127628) 2024-12-05T12:02:33,904 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073741888_1064 (size=127628) 2024-12-05T12:02:33,912 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073741889_1065 (size=20406) 2024-12-05T12:02:33,912 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073741889_1065 (size=20406) 2024-12-05T12:02:33,913 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073741889_1065 (size=20406) 2024-12-05T12:02:33,931 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073741890_1066 (size=5175431) 2024-12-05T12:02:33,931 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073741890_1066 (size=5175431) 2024-12-05T12:02:33,931 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073741890_1066 (size=5175431) 2024-12-05T12:02:33,940 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073741891_1067 (size=217634) 2024-12-05T12:02:33,940 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073741891_1067 (size=217634) 2024-12-05T12:02:33,940 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073741891_1067 (size=217634) 2024-12-05T12:02:33,952 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073741892_1068 (size=1832290) 2024-12-05T12:02:33,953 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073741892_1068 (size=1832290) 2024-12-05T12:02:33,953 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073741892_1068 (size=1832290) 2024-12-05T12:02:33,965 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073741893_1069 (size=322274) 2024-12-05T12:02:33,965 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073741893_1069 (size=322274) 2024-12-05T12:02:33,966 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073741893_1069 (size=322274) 2024-12-05T12:02:33,979 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073741894_1070 (size=503880) 2024-12-05T12:02:33,979 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073741894_1070 (size=503880) 2024-12-05T12:02:33,979 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073741894_1070 (size=503880) 2024-12-05T12:02:33,990 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073741895_1071 (size=29229) 2024-12-05T12:02:33,990 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073741895_1071 (size=29229) 2024-12-05T12:02:33,991 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073741895_1071 (size=29229) 2024-12-05T12:02:34,004 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073741896_1072 (size=24096) 2024-12-05T12:02:34,004 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073741896_1072 (size=24096) 2024-12-05T12:02:34,004 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073741896_1072 (size=24096) 2024-12-05T12:02:34,018 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073741897_1073 (size=111872) 2024-12-05T12:02:34,019 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073741897_1073 (size=111872) 2024-12-05T12:02:34,019 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073741897_1073 (size=111872) 2024-12-05T12:02:34,038 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073741898_1074 (size=45609) 2024-12-05T12:02:34,038 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073741898_1074 (size=45609) 2024-12-05T12:02:34,038 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073741898_1074 (size=45609) 2024-12-05T12:02:34,052 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073741899_1075 (size=136454) 2024-12-05T12:02:34,053 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073741899_1075 (size=136454) 2024-12-05T12:02:34,053 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073741899_1075 (size=136454) 2024-12-05T12:02:34,054 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-12-05T12:02:34,058 INFO [Time-limited test {}] snapshot.ExportSnapshot(663): Loading Snapshot 'snapshot-testExportFileSystemStateWithSplitRegion' hfile list 2024-12-05T12:02:34,064 DEBUG [Time-limited test {}] snapshot.ExportSnapshot$1(689): Skip the existing file: cf/testExportFileSystemStateWithSplitRegion=d7a3a0fac78c2c670fa3e1a1d368bf66-540ad97146874d80af9cb7c5229f6232_SeqId_4_. 2024-12-05T12:02:34,064 DEBUG [Time-limited test {}] snapshot.ExportSnapshot$1(689): Skip the existing file: cf/testExportFileSystemStateWithSplitRegion=d7a3a0fac78c2c670fa3e1a1d368bf66-540ad97146874d80af9cb7c5229f6232_SeqId_4_. 2024-12-05T12:02:34,064 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=0 size=305.6 M 2024-12-05T12:02:34,079 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073741900_1076 (size=244) 2024-12-05T12:02:34,079 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073741900_1076 (size=244) 2024-12-05T12:02:34,080 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073741900_1076 (size=244) 2024-12-05T12:02:34,091 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073741901_1077 (size=17) 2024-12-05T12:02:34,091 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073741901_1077 (size=17) 2024-12-05T12:02:34,091 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073741901_1077 (size=17) 2024-12-05T12:02:34,252 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073741902_1078 (size=304054) 2024-12-05T12:02:34,256 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073741902_1078 (size=304054) 2024-12-05T12:02:34,257 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073741902_1078 (size=304054) 2024-12-05T12:02:34,608 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-05T12:02:34,608 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-05T12:02:34,782 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733400137807_0001_000001 (auth:SIMPLE) from 127.0.0.1:32856 2024-12-05T12:02:36,311 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-05T12:02:37,090 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testExportFileSystemStateWithSplitRegion' 2024-12-05T12:02:38,980 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-05T12:02:40,913 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733400137807_0001_000001 (auth:SIMPLE) from 127.0.0.1:49038 2024-12-05T12:02:41,130 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073741903_1079 (size=349752) 2024-12-05T12:02:41,130 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073741903_1079 (size=349752) 2024-12-05T12:02:41,130 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073741903_1079 (size=349752) 2024-12-05T12:02:43,158 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733400137807_0001_000001 (auth:SIMPLE) from 127.0.0.1:59022 2024-12-05T12:02:55,035 INFO [master/80666a11a09f:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-12-05T12:02:55,035 INFO [master/80666a11a09f:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-12-05T12:03:05,173 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 446097405ca8c47d8ea55b39855ba137, had cached 0 bytes from a total of 15719 2024-12-05T12:03:05,192 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region f4bc653ea89173afedeec879d98060f1, had cached 0 bytes from a total of 5976 2024-12-05T12:03:08,980 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-05T12:03:13,864 DEBUG [master/80666a11a09f:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region f4bc653ea89173afedeec879d98060f1 changed from -1.0 to 0.0, refreshing cache 2024-12-05T12:03:13,865 DEBUG [master/80666a11a09f:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 02119c1ced80736cbd0ab02ca19ee30c changed from -1.0 to 0.0, refreshing cache 2024-12-05T12:03:13,865 DEBUG [master/80666a11a09f:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 446097405ca8c47d8ea55b39855ba137 changed from -1.0 to 0.0, refreshing cache 2024-12-05T12:03:16,324 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region b528516e028fdfdc1aaa2ac017ae9a25, had cached 0 bytes from a total of 320414712 2024-12-05T12:03:16,368 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 854c42f125a98370341f93094bde91b0, had cached 0 bytes from a total of 320414712 2024-12-05T12:03:20,684 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073741904_1080 (size=134217728) 2024-12-05T12:03:20,685 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073741904_1080 (size=134217728) 2024-12-05T12:03:20,685 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073741904_1080 (size=134217728) 2024-12-05T12:03:38,981 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-05T12:03:50,174 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 446097405ca8c47d8ea55b39855ba137, had cached 0 bytes from a total of 15719 2024-12-05T12:03:50,192 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region f4bc653ea89173afedeec879d98060f1, had cached 0 bytes from a total of 5976 2024-12-05T12:03:51,841 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073741905_1081 (size=134217728) 2024-12-05T12:03:51,841 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073741905_1081 (size=134217728) 2024-12-05T12:03:51,841 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073741905_1081 (size=134217728) 2024-12-05T12:04:01,324 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region b528516e028fdfdc1aaa2ac017ae9a25, had cached 0 bytes from a total of 320414712 2024-12-05T12:04:01,369 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 854c42f125a98370341f93094bde91b0, had cached 0 bytes from a total of 320414712 2024-12-05T12:04:04,021 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073741906_1082 (size=51979256) 2024-12-05T12:04:04,021 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073741906_1082 (size=51979256) 2024-12-05T12:04:04,021 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073741906_1082 (size=51979256) 2024-12-05T12:04:04,078 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073741907_1083 (size=17520) 2024-12-05T12:04:04,078 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073741907_1083 (size=17520) 2024-12-05T12:04:04,079 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073741907_1083 (size=17520) 2024-12-05T12:04:04,088 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073741908_1084 (size=482) 2024-12-05T12:04:04,088 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073741908_1084 (size=482) 2024-12-05T12:04:04,088 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073741908_1084 (size=482) 2024-12-05T12:04:04,118 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073741909_1085 (size=17520) 2024-12-05T12:04:04,118 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073741909_1085 (size=17520) 2024-12-05T12:04:04,118 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073741909_1085 (size=17520) 2024-12-05T12:04:04,121 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_693627247/yarn-7091037402/MiniMRCluster_693627247-localDir-nm-0_1/usercache/jenkins/appcache/application_1733400137807_0001/container_1733400137807_0001_01_000002/launch_container.sh] 2024-12-05T12:04:04,121 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_693627247/yarn-7091037402/MiniMRCluster_693627247-localDir-nm-0_1/usercache/jenkins/appcache/application_1733400137807_0001/container_1733400137807_0001_01_000002/container_tokens] 2024-12-05T12:04:04,121 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_693627247/yarn-7091037402/MiniMRCluster_693627247-localDir-nm-0_1/usercache/jenkins/appcache/application_1733400137807_0001/container_1733400137807_0001_01_000002/sysfs] 2024-12-05T12:04:04,138 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073741910_1086 (size=349752) 2024-12-05T12:04:04,139 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073741910_1086 (size=349752) 2024-12-05T12:04:04,139 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073741910_1086 (size=349752) 2024-12-05T12:04:04,150 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733400137807_0001_000001 (auth:SIMPLE) from 127.0.0.1:33186 2024-12-05T12:04:06,027 INFO [Time-limited test {}] snapshot.ExportSnapshot(1219): Finalize the Snapshot Export 2024-12-05T12:04:06,029 INFO [Time-limited test {}] snapshot.ExportSnapshot(1230): Verify the exported snapshot's expiration status and integrity. 2024-12-05T12:04:06,035 INFO [Time-limited test {}] snapshot.ExportSnapshot(1236): Export Completed: snapshot-testExportFileSystemStateWithSplitRegion 2024-12-05T12:04:06,036 INFO [Time-limited test {}] snapshot.TestExportSnapshot(409): Exported snapshot 2024-12-05T12:04:06,036 INFO [Time-limited test {}] snapshot.TestExportSnapshot(420): Verified filesystem state 2024-12-05T12:04:06,036 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-954888707_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/.hbase-snapshot/snapshot-testExportFileSystemStateWithSplitRegion at hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/.hbase-snapshot/snapshot-testExportFileSystemStateWithSplitRegion 2024-12-05T12:04:06,037 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/.hbase-snapshot/snapshot-testExportFileSystemStateWithSplitRegion/.snapshotinfo 2024-12-05T12:04:06,037 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/.hbase-snapshot/snapshot-testExportFileSystemStateWithSplitRegion/data.manifest 2024-12-05T12:04:06,037 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-954888707_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/export-test/export-1733400152078/.hbase-snapshot/snapshot-testExportFileSystemStateWithSplitRegion at hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/export-test/export-1733400152078/.hbase-snapshot/snapshot-testExportFileSystemStateWithSplitRegion 2024-12-05T12:04:06,037 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/export-test/export-1733400152078/.hbase-snapshot/snapshot-testExportFileSystemStateWithSplitRegion/.snapshotinfo 2024-12-05T12:04:06,037 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/export-test/export-1733400152078/.hbase-snapshot/snapshot-testExportFileSystemStateWithSplitRegion/data.manifest 2024-12-05T12:04:06,051 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.2 disable testExportFileSystemStateWithSplitRegion 2024-12-05T12:04:06,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] procedure2.ProcedureExecutor(1139): Stored pid=31, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testExportFileSystemStateWithSplitRegion 2024-12-05T12:04:06,058 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testExportFileSystemStateWithSplitRegion","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733400246058"}]},"ts":"1733400246058"} 2024-12-05T12:04:06,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=31 2024-12-05T12:04:06,060 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=testExportFileSystemStateWithSplitRegion, state=DISABLING in hbase:meta 2024-12-05T12:04:06,060 INFO [PEWorker-2 {}] procedure.DisableTableProcedure(284): Set testExportFileSystemStateWithSplitRegion to state=DISABLING 2024-12-05T12:04:06,062 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=32, ppid=31, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testExportFileSystemStateWithSplitRegion}] 2024-12-05T12:04:06,066 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=33, ppid=32, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=b528516e028fdfdc1aaa2ac017ae9a25, UNASSIGN}, {pid=34, ppid=32, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=854c42f125a98370341f93094bde91b0, UNASSIGN}] 2024-12-05T12:04:06,067 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=34, ppid=32, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=854c42f125a98370341f93094bde91b0, UNASSIGN 2024-12-05T12:04:06,068 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=33, ppid=32, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=b528516e028fdfdc1aaa2ac017ae9a25, UNASSIGN 2024-12-05T12:04:06,069 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=34 updating hbase:meta row=854c42f125a98370341f93094bde91b0, regionState=CLOSING, regionLocation=80666a11a09f,42305,1733400131012 2024-12-05T12:04:06,069 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=33 updating hbase:meta row=b528516e028fdfdc1aaa2ac017ae9a25, regionState=CLOSING, regionLocation=80666a11a09f,42305,1733400131012 2024-12-05T12:04:06,071 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=33, ppid=32, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=b528516e028fdfdc1aaa2ac017ae9a25, UNASSIGN because future has completed 2024-12-05T12:04:06,071 DEBUG [PEWorker-1 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-05T12:04:06,071 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=35, ppid=33, state=RUNNABLE, hasLock=false; CloseRegionProcedure b528516e028fdfdc1aaa2ac017ae9a25, server=80666a11a09f,42305,1733400131012}] 2024-12-05T12:04:06,072 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=34, ppid=32, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=854c42f125a98370341f93094bde91b0, UNASSIGN because future has completed 2024-12-05T12:04:06,072 DEBUG [PEWorker-2 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-05T12:04:06,072 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=36, ppid=34, state=RUNNABLE, hasLock=false; CloseRegionProcedure 854c42f125a98370341f93094bde91b0, server=80666a11a09f,42305,1733400131012}] 2024-12-05T12:04:06,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=31 2024-12-05T12:04:06,225 INFO [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] handler.UnassignRegionHandler(122): Close b528516e028fdfdc1aaa2ac017ae9a25 2024-12-05T12:04:06,225 DEBUG [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-05T12:04:06,225 DEBUG [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.HRegion(1722): Closing b528516e028fdfdc1aaa2ac017ae9a25, disabling compactions & flushes 2024-12-05T12:04:06,225 INFO [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.HRegion(1755): Closing region testExportFileSystemStateWithSplitRegion,,1733400150584.b528516e028fdfdc1aaa2ac017ae9a25. 2024-12-05T12:04:06,225 DEBUG [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.HRegion(1776): Time limited wait for close lock on testExportFileSystemStateWithSplitRegion,,1733400150584.b528516e028fdfdc1aaa2ac017ae9a25. 2024-12-05T12:04:06,225 DEBUG [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.HRegion(1843): Acquired close lock on testExportFileSystemStateWithSplitRegion,,1733400150584.b528516e028fdfdc1aaa2ac017ae9a25. after waiting 0 ms 2024-12-05T12:04:06,225 DEBUG [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.HRegion(1853): Updates disabled for region testExportFileSystemStateWithSplitRegion,,1733400150584.b528516e028fdfdc1aaa2ac017ae9a25. 2024-12-05T12:04:06,231 DEBUG [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testExportFileSystemStateWithSplitRegion/b528516e028fdfdc1aaa2ac017ae9a25/recovered.edits/10.seqid, newMaxSeqId=10, maxSeqId=6 2024-12-05T12:04:06,232 DEBUG [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-05T12:04:06,232 INFO [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.HRegion(1973): Closed testExportFileSystemStateWithSplitRegion,,1733400150584.b528516e028fdfdc1aaa2ac017ae9a25. 2024-12-05T12:04:06,232 DEBUG [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.HRegion(1676): Region close journal for b528516e028fdfdc1aaa2ac017ae9a25: Waiting for close lock at 1733400246225Running coprocessor pre-close hooks at 1733400246225Disabling compacts and flushes for region at 1733400246225Disabling writes for close at 1733400246225Writing region close event to WAL at 1733400246226 (+1 ms)Running coprocessor post-close hooks at 1733400246232 (+6 ms)Closed at 1733400246232 2024-12-05T12:04:06,235 INFO [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] handler.UnassignRegionHandler(157): Closed b528516e028fdfdc1aaa2ac017ae9a25 2024-12-05T12:04:06,235 INFO [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=36}] handler.UnassignRegionHandler(122): Close 854c42f125a98370341f93094bde91b0 2024-12-05T12:04:06,235 DEBUG [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=36}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-05T12:04:06,235 DEBUG [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=36}] regionserver.HRegion(1722): Closing 854c42f125a98370341f93094bde91b0, disabling compactions & flushes 2024-12-05T12:04:06,235 INFO [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=36}] regionserver.HRegion(1755): Closing region testExportFileSystemStateWithSplitRegion,5,1733400150584.854c42f125a98370341f93094bde91b0. 2024-12-05T12:04:06,235 DEBUG [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=36}] regionserver.HRegion(1776): Time limited wait for close lock on testExportFileSystemStateWithSplitRegion,5,1733400150584.854c42f125a98370341f93094bde91b0. 2024-12-05T12:04:06,235 DEBUG [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=36}] regionserver.HRegion(1843): Acquired close lock on testExportFileSystemStateWithSplitRegion,5,1733400150584.854c42f125a98370341f93094bde91b0. after waiting 0 ms 2024-12-05T12:04:06,235 DEBUG [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=36}] regionserver.HRegion(1853): Updates disabled for region testExportFileSystemStateWithSplitRegion,5,1733400150584.854c42f125a98370341f93094bde91b0. 2024-12-05T12:04:06,236 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=33 updating hbase:meta row=b528516e028fdfdc1aaa2ac017ae9a25, regionState=CLOSED 2024-12-05T12:04:06,239 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=35, ppid=33, state=RUNNABLE, hasLock=false; CloseRegionProcedure b528516e028fdfdc1aaa2ac017ae9a25, server=80666a11a09f,42305,1733400131012 because future has completed 2024-12-05T12:04:06,251 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=35, resume processing ppid=33 2024-12-05T12:04:06,250 DEBUG [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=36}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testExportFileSystemStateWithSplitRegion/854c42f125a98370341f93094bde91b0/recovered.edits/10.seqid, newMaxSeqId=10, maxSeqId=6 2024-12-05T12:04:06,251 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=35, ppid=33, state=SUCCESS, hasLock=false; CloseRegionProcedure b528516e028fdfdc1aaa2ac017ae9a25, server=80666a11a09f,42305,1733400131012 in 177 msec 2024-12-05T12:04:06,251 DEBUG [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=36}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-05T12:04:06,251 INFO [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=36}] regionserver.HRegion(1973): Closed testExportFileSystemStateWithSplitRegion,5,1733400150584.854c42f125a98370341f93094bde91b0. 2024-12-05T12:04:06,251 DEBUG [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=36}] regionserver.HRegion(1676): Region close journal for 854c42f125a98370341f93094bde91b0: Waiting for close lock at 1733400246235Running coprocessor pre-close hooks at 1733400246235Disabling compacts and flushes for region at 1733400246235Disabling writes for close at 1733400246235Writing region close event to WAL at 1733400246238 (+3 ms)Running coprocessor post-close hooks at 1733400246251 (+13 ms)Closed at 1733400246251 2024-12-05T12:04:06,252 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=33, ppid=32, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=b528516e028fdfdc1aaa2ac017ae9a25, UNASSIGN in 185 msec 2024-12-05T12:04:06,253 INFO [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=36}] handler.UnassignRegionHandler(157): Closed 854c42f125a98370341f93094bde91b0 2024-12-05T12:04:06,254 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=34 updating hbase:meta row=854c42f125a98370341f93094bde91b0, regionState=CLOSED 2024-12-05T12:04:06,255 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=36, ppid=34, state=RUNNABLE, hasLock=false; CloseRegionProcedure 854c42f125a98370341f93094bde91b0, server=80666a11a09f,42305,1733400131012 because future has completed 2024-12-05T12:04:06,258 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=36, resume processing ppid=34 2024-12-05T12:04:06,258 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=36, ppid=34, state=SUCCESS, hasLock=false; CloseRegionProcedure 854c42f125a98370341f93094bde91b0, server=80666a11a09f,42305,1733400131012 in 184 msec 2024-12-05T12:04:06,260 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=34, resume processing ppid=32 2024-12-05T12:04:06,260 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=34, ppid=32, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=854c42f125a98370341f93094bde91b0, UNASSIGN in 192 msec 2024-12-05T12:04:06,263 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=32, resume processing ppid=31 2024-12-05T12:04:06,263 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=32, ppid=31, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testExportFileSystemStateWithSplitRegion in 199 msec 2024-12-05T12:04:06,264 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testExportFileSystemStateWithSplitRegion","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733400246264"}]},"ts":"1733400246264"} 2024-12-05T12:04:06,266 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=testExportFileSystemStateWithSplitRegion, state=DISABLED in hbase:meta 2024-12-05T12:04:06,266 INFO [PEWorker-2 {}] procedure.DisableTableProcedure(296): Set testExportFileSystemStateWithSplitRegion to state=DISABLED 2024-12-05T12:04:06,268 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=31, state=SUCCESS, hasLock=false; DisableTableProcedure table=testExportFileSystemStateWithSplitRegion in 214 msec 2024-12-05T12:04:06,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=31 2024-12-05T12:04:06,379 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testExportFileSystemStateWithSplitRegion completed 2024-12-05T12:04:06,383 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.2 delete testExportFileSystemStateWithSplitRegion 2024-12-05T12:04:06,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] procedure2.ProcedureExecutor(1139): Stored pid=37, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testExportFileSystemStateWithSplitRegion 2024-12-05T12:04:06,390 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=37, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testExportFileSystemStateWithSplitRegion 2024-12-05T12:04:06,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] access.PermissionStorage(261): Removing permissions of removed table testExportFileSystemStateWithSplitRegion 2024-12-05T12:04:06,393 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=37, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testExportFileSystemStateWithSplitRegion 2024-12-05T12:04:06,397 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42305 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testExportFileSystemStateWithSplitRegion 2024-12-05T12:04:06,405 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testExportFileSystemStateWithSplitRegion/d7a3a0fac78c2c670fa3e1a1d368bf66 2024-12-05T12:04:06,405 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testExportFileSystemStateWithSplitRegion/b528516e028fdfdc1aaa2ac017ae9a25 2024-12-05T12:04:06,405 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testExportFileSystemStateWithSplitRegion/854c42f125a98370341f93094bde91b0 2024-12-05T12:04:06,408 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testExportFileSystemStateWithSplitRegion/b528516e028fdfdc1aaa2ac017ae9a25/cf, FileablePath, hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testExportFileSystemStateWithSplitRegion/b528516e028fdfdc1aaa2ac017ae9a25/recovered.edits] 2024-12-05T12:04:06,408 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testExportFileSystemStateWithSplitRegion/854c42f125a98370341f93094bde91b0/cf, FileablePath, hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testExportFileSystemStateWithSplitRegion/854c42f125a98370341f93094bde91b0/recovered.edits] 2024-12-05T12:04:06,408 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testExportFileSystemStateWithSplitRegion/d7a3a0fac78c2c670fa3e1a1d368bf66/cf, FileablePath, hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testExportFileSystemStateWithSplitRegion/d7a3a0fac78c2c670fa3e1a1d368bf66/recovered.edits] 2024-12-05T12:04:06,416 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testExportFileSystemStateWithSplitRegion/d7a3a0fac78c2c670fa3e1a1d368bf66/cf/540ad97146874d80af9cb7c5229f6232_SeqId_4_ to hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/archive/data/default/testExportFileSystemStateWithSplitRegion/d7a3a0fac78c2c670fa3e1a1d368bf66/cf/540ad97146874d80af9cb7c5229f6232_SeqId_4_ 2024-12-05T12:04:06,416 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testExportFileSystemStateWithSplitRegion/b528516e028fdfdc1aaa2ac017ae9a25/cf/540ad97146874d80af9cb7c5229f6232_SeqId_4_.d7a3a0fac78c2c670fa3e1a1d368bf66 to hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/archive/data/default/testExportFileSystemStateWithSplitRegion/b528516e028fdfdc1aaa2ac017ae9a25/cf/540ad97146874d80af9cb7c5229f6232_SeqId_4_.d7a3a0fac78c2c670fa3e1a1d368bf66 2024-12-05T12:04:06,416 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testExportFileSystemStateWithSplitRegion/854c42f125a98370341f93094bde91b0/cf/540ad97146874d80af9cb7c5229f6232_SeqId_4_.d7a3a0fac78c2c670fa3e1a1d368bf66 to hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/archive/data/default/testExportFileSystemStateWithSplitRegion/854c42f125a98370341f93094bde91b0/cf/540ad97146874d80af9cb7c5229f6232_SeqId_4_.d7a3a0fac78c2c670fa3e1a1d368bf66 2024-12-05T12:04:06,419 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testExportFileSystemStateWithSplitRegion/854c42f125a98370341f93094bde91b0/recovered.edits/10.seqid to hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/archive/data/default/testExportFileSystemStateWithSplitRegion/854c42f125a98370341f93094bde91b0/recovered.edits/10.seqid 2024-12-05T12:04:06,420 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testExportFileSystemStateWithSplitRegion/b528516e028fdfdc1aaa2ac017ae9a25/recovered.edits/10.seqid to hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/archive/data/default/testExportFileSystemStateWithSplitRegion/b528516e028fdfdc1aaa2ac017ae9a25/recovered.edits/10.seqid 2024-12-05T12:04:06,420 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testExportFileSystemStateWithSplitRegion/d7a3a0fac78c2c670fa3e1a1d368bf66/recovered.edits/6.seqid to hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/archive/data/default/testExportFileSystemStateWithSplitRegion/d7a3a0fac78c2c670fa3e1a1d368bf66/recovered.edits/6.seqid 2024-12-05T12:04:06,420 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testExportFileSystemStateWithSplitRegion/854c42f125a98370341f93094bde91b0 2024-12-05T12:04:06,420 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testExportFileSystemStateWithSplitRegion/b528516e028fdfdc1aaa2ac017ae9a25 2024-12-05T12:04:06,420 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testExportFileSystemStateWithSplitRegion/d7a3a0fac78c2c670fa3e1a1d368bf66 2024-12-05T12:04:06,420 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(313): Archived testExportFileSystemStateWithSplitRegion regions 2024-12-05T12:04:06,423 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=37, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testExportFileSystemStateWithSplitRegion 2024-12-05T12:04:06,429 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37629 {}] util.ReflectedFunctionCache(97): Populated cache for org.apache.hadoop.hbase.filter.KeyOnlyFilter in 0ms 2024-12-05T12:04:06,435 WARN [PEWorker-5 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 3 rows of testExportFileSystemStateWithSplitRegion from hbase:meta 2024-12-05T12:04:06,439 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(407): Removing 'testExportFileSystemStateWithSplitRegion' descriptor. 2024-12-05T12:04:06,441 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=37, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testExportFileSystemStateWithSplitRegion 2024-12-05T12:04:06,441 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(397): Removing 'testExportFileSystemStateWithSplitRegion' from region states. 2024-12-05T12:04:06,442 DEBUG [PEWorker-5 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testExportFileSystemStateWithSplitRegion,,1733400143878.d7a3a0fac78c2c670fa3e1a1d368bf66.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733400246441"}]},"ts":"9223372036854775807"} 2024-12-05T12:04:06,442 DEBUG [PEWorker-5 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testExportFileSystemStateWithSplitRegion,,1733400150584.b528516e028fdfdc1aaa2ac017ae9a25.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733400246441"}]},"ts":"9223372036854775807"} 2024-12-05T12:04:06,442 DEBUG [PEWorker-5 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testExportFileSystemStateWithSplitRegion,5,1733400150584.854c42f125a98370341f93094bde91b0.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733400246441"}]},"ts":"9223372036854775807"} 2024-12-05T12:04:06,442 DEBUG [pool-63-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42913-0x101a6a890970001, quorum=127.0.0.1:49240, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testExportFileSystemStateWithSplitRegion 2024-12-05T12:04:06,442 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37629-0x101a6a890970002, quorum=127.0.0.1:49240, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testExportFileSystemStateWithSplitRegion 2024-12-05T12:04:06,442 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34947-0x101a6a890970000, quorum=127.0.0.1:49240, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testExportFileSystemStateWithSplitRegion 2024-12-05T12:04:06,442 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42305-0x101a6a890970003, quorum=127.0.0.1:49240, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testExportFileSystemStateWithSplitRegion 2024-12-05T12:04:06,443 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportFileSystemStateWithSplitRegion with data PBUF 2024-12-05T12:04:06,443 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportFileSystemStateWithSplitRegion with data PBUF 2024-12-05T12:04:06,443 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportFileSystemStateWithSplitRegion with data PBUF 2024-12-05T12:04:06,443 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportFileSystemStateWithSplitRegion with data PBUF 2024-12-05T12:04:06,447 INFO [PEWorker-5 {}] assignment.RegionStateStore(562): Deleted 3 regions from META 2024-12-05T12:04:06,447 DEBUG [PEWorker-5 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => d7a3a0fac78c2c670fa3e1a1d368bf66, NAME => 'testExportFileSystemStateWithSplitRegion,,1733400143878.d7a3a0fac78c2c670fa3e1a1d368bf66.', STARTKEY => '', ENDKEY => ''}, {ENCODED => b528516e028fdfdc1aaa2ac017ae9a25, NAME => 'testExportFileSystemStateWithSplitRegion,,1733400150584.b528516e028fdfdc1aaa2ac017ae9a25.', STARTKEY => '', ENDKEY => '5'}, {ENCODED => 854c42f125a98370341f93094bde91b0, NAME => 'testExportFileSystemStateWithSplitRegion,5,1733400150584.854c42f125a98370341f93094bde91b0.', STARTKEY => '5', ENDKEY => ''}] 2024-12-05T12:04:06,448 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(401): Marking 'testExportFileSystemStateWithSplitRegion' as deleted. 2024-12-05T12:04:06,448 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testExportFileSystemStateWithSplitRegion","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733400246448"}]},"ts":"9223372036854775807"} 2024-12-05T12:04:06,451 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42305-0x101a6a890970003, quorum=127.0.0.1:49240, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testExportFileSystemStateWithSplitRegion 2024-12-05T12:04:06,451 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34947-0x101a6a890970000, quorum=127.0.0.1:49240, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testExportFileSystemStateWithSplitRegion 2024-12-05T12:04:06,451 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37629-0x101a6a890970002, quorum=127.0.0.1:49240, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testExportFileSystemStateWithSplitRegion 2024-12-05T12:04:06,451 DEBUG [pool-63-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42913-0x101a6a890970001, quorum=127.0.0.1:49240, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testExportFileSystemStateWithSplitRegion 2024-12-05T12:04:06,451 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42305-0x101a6a890970003, quorum=127.0.0.1:49240, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-05T12:04:06,451 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34947-0x101a6a890970000, quorum=127.0.0.1:49240, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-05T12:04:06,451 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37629-0x101a6a890970002, quorum=127.0.0.1:49240, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-05T12:04:06,451 DEBUG [pool-63-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42913-0x101a6a890970001, quorum=127.0.0.1:49240, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-05T12:04:06,452 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(867): Deleted table testExportFileSystemStateWithSplitRegion state from META 2024-12-05T12:04:06,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=37 2024-12-05T12:04:06,453 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSplitRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithSplitRegion \x00 \x01 \x02 \x03 \x04 2024-12-05T12:04:06,453 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSplitRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithSplitRegion \x00 \x01 \x02 \x03 \x04 2024-12-05T12:04:06,453 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSplitRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithSplitRegion \x00 \x01 \x02 \x03 \x04 2024-12-05T12:04:06,454 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSplitRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithSplitRegion \x00 \x01 \x02 \x03 \x04 2024-12-05T12:04:06,454 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(133): Finished pid=37, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testExportFileSystemStateWithSplitRegion 2024-12-05T12:04:06,457 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=37, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testExportFileSystemStateWithSplitRegion in 70 msec 2024-12-05T12:04:06,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=37 2024-12-05T12:04:06,559 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testExportFileSystemStateWithSplitRegion 2024-12-05T12:04:06,560 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testExportFileSystemStateWithSplitRegion completed 2024-12-05T12:04:06,560 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.2 disable testtb-testExportFileSystemStateWithSplitRegion 2024-12-05T12:04:06,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] procedure2.ProcedureExecutor(1139): Stored pid=38, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testtb-testExportFileSystemStateWithSplitRegion 2024-12-05T12:04:06,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=38 2024-12-05T12:04:06,564 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSplitRegion","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733400246564"}]},"ts":"1733400246564"} 2024-12-05T12:04:06,567 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithSplitRegion, state=DISABLING in hbase:meta 2024-12-05T12:04:06,567 INFO [PEWorker-4 {}] procedure.DisableTableProcedure(284): Set testtb-testExportFileSystemStateWithSplitRegion to state=DISABLING 2024-12-05T12:04:06,571 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=39, ppid=38, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportFileSystemStateWithSplitRegion}] 2024-12-05T12:04:06,574 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=40, ppid=39, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=f4bc653ea89173afedeec879d98060f1, UNASSIGN}, {pid=41, ppid=39, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=446097405ca8c47d8ea55b39855ba137, UNASSIGN}] 2024-12-05T12:04:06,575 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=40, ppid=39, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=f4bc653ea89173afedeec879d98060f1, UNASSIGN 2024-12-05T12:04:06,575 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=41, ppid=39, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=446097405ca8c47d8ea55b39855ba137, UNASSIGN 2024-12-05T12:04:06,576 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=40 updating hbase:meta row=f4bc653ea89173afedeec879d98060f1, regionState=CLOSING, regionLocation=80666a11a09f,37629,1733400130958 2024-12-05T12:04:06,576 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=41 updating hbase:meta row=446097405ca8c47d8ea55b39855ba137, regionState=CLOSING, regionLocation=80666a11a09f,42305,1733400131012 2024-12-05T12:04:06,580 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=40, ppid=39, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=f4bc653ea89173afedeec879d98060f1, UNASSIGN because future has completed 2024-12-05T12:04:06,580 DEBUG [PEWorker-5 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-05T12:04:06,580 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=42, ppid=40, state=RUNNABLE, hasLock=false; CloseRegionProcedure f4bc653ea89173afedeec879d98060f1, server=80666a11a09f,37629,1733400130958}] 2024-12-05T12:04:06,581 WARN [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34947 {}] assignment.AssignmentManager(1543): Unable to acquire lock for regionNode state=CLOSING, location=80666a11a09f,42305,1733400131012, table=testtb-testExportFileSystemStateWithSplitRegion, region=446097405ca8c47d8ea55b39855ba137. It is likely that another thread is currently holding the lock. To avoid deadlock, skip execution for now. 2024-12-05T12:04:06,581 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=41, ppid=39, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=446097405ca8c47d8ea55b39855ba137, UNASSIGN because future has completed 2024-12-05T12:04:06,582 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-05T12:04:06,582 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=43, ppid=41, state=RUNNABLE, hasLock=false; CloseRegionProcedure 446097405ca8c47d8ea55b39855ba137, server=80666a11a09f,42305,1733400131012}] 2024-12-05T12:04:06,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=38 2024-12-05T12:04:06,734 INFO [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=42}] handler.UnassignRegionHandler(122): Close f4bc653ea89173afedeec879d98060f1 2024-12-05T12:04:06,735 DEBUG [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=42}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-05T12:04:06,735 DEBUG [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=42}] regionserver.HRegion(1722): Closing f4bc653ea89173afedeec879d98060f1, disabling compactions & flushes 2024-12-05T12:04:06,735 INFO [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=42}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithSplitRegion,,1733400139725.f4bc653ea89173afedeec879d98060f1. 2024-12-05T12:04:06,735 DEBUG [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=42}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithSplitRegion,,1733400139725.f4bc653ea89173afedeec879d98060f1. 2024-12-05T12:04:06,735 DEBUG [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=42}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithSplitRegion,,1733400139725.f4bc653ea89173afedeec879d98060f1. after waiting 0 ms 2024-12-05T12:04:06,735 DEBUG [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=42}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithSplitRegion,,1733400139725.f4bc653ea89173afedeec879d98060f1. 2024-12-05T12:04:06,735 INFO [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] handler.UnassignRegionHandler(122): Close 446097405ca8c47d8ea55b39855ba137 2024-12-05T12:04:06,735 DEBUG [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-05T12:04:06,736 DEBUG [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] regionserver.HRegion(1722): Closing 446097405ca8c47d8ea55b39855ba137, disabling compactions & flushes 2024-12-05T12:04:06,736 INFO [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithSplitRegion,1,1733400139725.446097405ca8c47d8ea55b39855ba137. 2024-12-05T12:04:06,736 DEBUG [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithSplitRegion,1,1733400139725.446097405ca8c47d8ea55b39855ba137. 2024-12-05T12:04:06,736 DEBUG [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithSplitRegion,1,1733400139725.446097405ca8c47d8ea55b39855ba137. after waiting 0 ms 2024-12-05T12:04:06,736 DEBUG [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithSplitRegion,1,1733400139725.446097405ca8c47d8ea55b39855ba137. 2024-12-05T12:04:06,743 DEBUG [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportFileSystemStateWithSplitRegion/446097405ca8c47d8ea55b39855ba137/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-05T12:04:06,744 DEBUG [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-05T12:04:06,744 INFO [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithSplitRegion,1,1733400139725.446097405ca8c47d8ea55b39855ba137. 2024-12-05T12:04:06,744 DEBUG [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] regionserver.HRegion(1676): Region close journal for 446097405ca8c47d8ea55b39855ba137: Waiting for close lock at 1733400246735Running coprocessor pre-close hooks at 1733400246736 (+1 ms)Disabling compacts and flushes for region at 1733400246736Disabling writes for close at 1733400246736Writing region close event to WAL at 1733400246738 (+2 ms)Running coprocessor post-close hooks at 1733400246744 (+6 ms)Closed at 1733400246744 2024-12-05T12:04:06,747 INFO [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] handler.UnassignRegionHandler(157): Closed 446097405ca8c47d8ea55b39855ba137 2024-12-05T12:04:06,748 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=41 updating hbase:meta row=446097405ca8c47d8ea55b39855ba137, regionState=CLOSED 2024-12-05T12:04:06,750 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=43, ppid=41, state=RUNNABLE, hasLock=false; CloseRegionProcedure 446097405ca8c47d8ea55b39855ba137, server=80666a11a09f,42305,1733400131012 because future has completed 2024-12-05T12:04:06,763 DEBUG [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=42}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportFileSystemStateWithSplitRegion/f4bc653ea89173afedeec879d98060f1/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-05T12:04:06,764 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=43, resume processing ppid=41 2024-12-05T12:04:06,764 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=43, ppid=41, state=SUCCESS, hasLock=false; CloseRegionProcedure 446097405ca8c47d8ea55b39855ba137, server=80666a11a09f,42305,1733400131012 in 176 msec 2024-12-05T12:04:06,766 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=41, ppid=39, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=446097405ca8c47d8ea55b39855ba137, UNASSIGN in 191 msec 2024-12-05T12:04:06,766 DEBUG [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=42}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-05T12:04:06,767 INFO [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=42}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithSplitRegion,,1733400139725.f4bc653ea89173afedeec879d98060f1. 2024-12-05T12:04:06,767 DEBUG [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=42}] regionserver.HRegion(1676): Region close journal for f4bc653ea89173afedeec879d98060f1: Waiting for close lock at 1733400246735Running coprocessor pre-close hooks at 1733400246735Disabling compacts and flushes for region at 1733400246735Disabling writes for close at 1733400246735Writing region close event to WAL at 1733400246736 (+1 ms)Running coprocessor post-close hooks at 1733400246766 (+30 ms)Closed at 1733400246766 2024-12-05T12:04:06,769 INFO [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=42}] handler.UnassignRegionHandler(157): Closed f4bc653ea89173afedeec879d98060f1 2024-12-05T12:04:06,769 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=40 updating hbase:meta row=f4bc653ea89173afedeec879d98060f1, regionState=CLOSED 2024-12-05T12:04:06,771 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=42, ppid=40, state=RUNNABLE, hasLock=false; CloseRegionProcedure f4bc653ea89173afedeec879d98060f1, server=80666a11a09f,37629,1733400130958 because future has completed 2024-12-05T12:04:06,774 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=42, resume processing ppid=40 2024-12-05T12:04:06,775 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=42, ppid=40, state=SUCCESS, hasLock=false; CloseRegionProcedure f4bc653ea89173afedeec879d98060f1, server=80666a11a09f,37629,1733400130958 in 192 msec 2024-12-05T12:04:06,776 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=40, resume processing ppid=39 2024-12-05T12:04:06,777 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=40, ppid=39, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=f4bc653ea89173afedeec879d98060f1, UNASSIGN in 201 msec 2024-12-05T12:04:06,779 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=39, resume processing ppid=38 2024-12-05T12:04:06,780 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=39, ppid=38, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportFileSystemStateWithSplitRegion in 206 msec 2024-12-05T12:04:06,781 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSplitRegion","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733400246781"}]},"ts":"1733400246781"} 2024-12-05T12:04:06,784 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithSplitRegion, state=DISABLED in hbase:meta 2024-12-05T12:04:06,784 INFO [PEWorker-3 {}] procedure.DisableTableProcedure(296): Set testtb-testExportFileSystemStateWithSplitRegion to state=DISABLED 2024-12-05T12:04:06,786 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=38, state=SUCCESS, hasLock=false; DisableTableProcedure table=testtb-testExportFileSystemStateWithSplitRegion in 225 msec 2024-12-05T12:04:06,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=38 2024-12-05T12:04:06,878 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testtb-testExportFileSystemStateWithSplitRegion completed 2024-12-05T12:04:06,879 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.2 delete testtb-testExportFileSystemStateWithSplitRegion 2024-12-05T12:04:06,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] procedure2.ProcedureExecutor(1139): Stored pid=44, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSplitRegion 2024-12-05T12:04:06,882 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=44, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSplitRegion 2024-12-05T12:04:06,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] access.PermissionStorage(261): Removing permissions of removed table testtb-testExportFileSystemStateWithSplitRegion 2024-12-05T12:04:06,883 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=44, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSplitRegion 2024-12-05T12:04:06,888 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42305 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testtb-testExportFileSystemStateWithSplitRegion 2024-12-05T12:04:06,892 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportFileSystemStateWithSplitRegion/f4bc653ea89173afedeec879d98060f1 2024-12-05T12:04:06,892 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportFileSystemStateWithSplitRegion/446097405ca8c47d8ea55b39855ba137 2024-12-05T12:04:06,895 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportFileSystemStateWithSplitRegion/f4bc653ea89173afedeec879d98060f1/cf, FileablePath, hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportFileSystemStateWithSplitRegion/f4bc653ea89173afedeec879d98060f1/recovered.edits] 2024-12-05T12:04:06,895 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportFileSystemStateWithSplitRegion/446097405ca8c47d8ea55b39855ba137/cf, FileablePath, hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportFileSystemStateWithSplitRegion/446097405ca8c47d8ea55b39855ba137/recovered.edits] 2024-12-05T12:04:06,902 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportFileSystemStateWithSplitRegion/446097405ca8c47d8ea55b39855ba137/cf/79bc4769f851412fb9385fd28cade0d2 to hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/archive/data/default/testtb-testExportFileSystemStateWithSplitRegion/446097405ca8c47d8ea55b39855ba137/cf/79bc4769f851412fb9385fd28cade0d2 2024-12-05T12:04:06,902 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportFileSystemStateWithSplitRegion/f4bc653ea89173afedeec879d98060f1/cf/250c1b1506e848e4b3cae25e6184e3e1 to hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/archive/data/default/testtb-testExportFileSystemStateWithSplitRegion/f4bc653ea89173afedeec879d98060f1/cf/250c1b1506e848e4b3cae25e6184e3e1 2024-12-05T12:04:06,909 DEBUG [pool-63-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42913-0x101a6a890970001, quorum=127.0.0.1:49240, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSplitRegion 2024-12-05T12:04:06,909 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34947-0x101a6a890970000, quorum=127.0.0.1:49240, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSplitRegion 2024-12-05T12:04:06,909 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42305-0x101a6a890970003, quorum=127.0.0.1:49240, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSplitRegion 2024-12-05T12:04:06,911 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportFileSystemStateWithSplitRegion/446097405ca8c47d8ea55b39855ba137/recovered.edits/9.seqid to hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/archive/data/default/testtb-testExportFileSystemStateWithSplitRegion/446097405ca8c47d8ea55b39855ba137/recovered.edits/9.seqid 2024-12-05T12:04:06,911 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37629-0x101a6a890970002, quorum=127.0.0.1:49240, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSplitRegion 2024-12-05T12:04:06,911 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportFileSystemStateWithSplitRegion/f4bc653ea89173afedeec879d98060f1/recovered.edits/9.seqid to hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/archive/data/default/testtb-testExportFileSystemStateWithSplitRegion/f4bc653ea89173afedeec879d98060f1/recovered.edits/9.seqid 2024-12-05T12:04:06,911 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSplitRegion with data PBUF 2024-12-05T12:04:06,911 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSplitRegion with data PBUF 2024-12-05T12:04:06,911 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSplitRegion with data PBUF 2024-12-05T12:04:06,911 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportFileSystemStateWithSplitRegion/446097405ca8c47d8ea55b39855ba137 2024-12-05T12:04:06,914 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportFileSystemStateWithSplitRegion/f4bc653ea89173afedeec879d98060f1 2024-12-05T12:04:06,914 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(313): Archived testtb-testExportFileSystemStateWithSplitRegion regions 2024-12-05T12:04:06,915 DEBUG [PEWorker-5 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/mobdir/data/default/testtb-testExportFileSystemStateWithSplitRegion/8bc6791f9a8595eff0f40af260f3f56c 2024-12-05T12:04:06,916 DEBUG [PEWorker-5 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/mobdir/data/default/testtb-testExportFileSystemStateWithSplitRegion/8bc6791f9a8595eff0f40af260f3f56c/cf] 2024-12-05T12:04:06,917 DEBUG [pool-63-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42913-0x101a6a890970001, quorum=127.0.0.1:49240, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSplitRegion 2024-12-05T12:04:06,917 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42305-0x101a6a890970003, quorum=127.0.0.1:49240, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSplitRegion 2024-12-05T12:04:06,917 DEBUG [pool-63-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42913-0x101a6a890970001, quorum=127.0.0.1:49240, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-05T12:04:06,917 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42305-0x101a6a890970003, quorum=127.0.0.1:49240, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-05T12:04:06,917 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34947-0x101a6a890970000, quorum=127.0.0.1:49240, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSplitRegion 2024-12-05T12:04:06,917 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34947-0x101a6a890970000, quorum=127.0.0.1:49240, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-05T12:04:06,917 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSplitRegion with data null 2024-12-05T12:04:06,917 INFO [zk-permission-watcher-pool-0 {}] access.AuthManager(136): Skipping permission cache refresh because writable data is empty 2024-12-05T12:04:06,919 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37629-0x101a6a890970002, quorum=127.0.0.1:49240, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-05T12:04:06,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=44 2024-12-05T12:04:06,921 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/mobdir/data/default/testtb-testExportFileSystemStateWithSplitRegion/8bc6791f9a8595eff0f40af260f3f56c/cf/c4ca4238a0b923820dcc509a6f75849b20241205aed605eefcd2475cbbf0c6f7999361e5_446097405ca8c47d8ea55b39855ba137 to hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/archive/data/default/testtb-testExportFileSystemStateWithSplitRegion/8bc6791f9a8595eff0f40af260f3f56c/cf/c4ca4238a0b923820dcc509a6f75849b20241205aed605eefcd2475cbbf0c6f7999361e5_446097405ca8c47d8ea55b39855ba137 2024-12-05T12:04:06,923 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/mobdir/data/default/testtb-testExportFileSystemStateWithSplitRegion/8bc6791f9a8595eff0f40af260f3f56c/cf/d41d8cd98f00b204e9800998ecf8427e202412052507399ea5214d17b606414f4859c291_f4bc653ea89173afedeec879d98060f1 to hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/archive/data/default/testtb-testExportFileSystemStateWithSplitRegion/8bc6791f9a8595eff0f40af260f3f56c/cf/d41d8cd98f00b204e9800998ecf8427e202412052507399ea5214d17b606414f4859c291_f4bc653ea89173afedeec879d98060f1 2024-12-05T12:04:06,924 DEBUG [PEWorker-5 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/mobdir/data/default/testtb-testExportFileSystemStateWithSplitRegion/8bc6791f9a8595eff0f40af260f3f56c 2024-12-05T12:04:06,927 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=44, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSplitRegion 2024-12-05T12:04:06,931 WARN [PEWorker-5 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 2 rows of testtb-testExportFileSystemStateWithSplitRegion from hbase:meta 2024-12-05T12:04:06,934 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(407): Removing 'testtb-testExportFileSystemStateWithSplitRegion' descriptor. 2024-12-05T12:04:06,936 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=44, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSplitRegion 2024-12-05T12:04:06,936 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(397): Removing 'testtb-testExportFileSystemStateWithSplitRegion' from region states. 2024-12-05T12:04:06,937 DEBUG [PEWorker-5 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSplitRegion,,1733400139725.f4bc653ea89173afedeec879d98060f1.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733400246936"}]},"ts":"9223372036854775807"} 2024-12-05T12:04:06,937 DEBUG [PEWorker-5 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSplitRegion,1,1733400139725.446097405ca8c47d8ea55b39855ba137.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733400246936"}]},"ts":"9223372036854775807"} 2024-12-05T12:04:06,941 INFO [PEWorker-5 {}] assignment.RegionStateStore(562): Deleted 2 regions from META 2024-12-05T12:04:06,942 DEBUG [PEWorker-5 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => f4bc653ea89173afedeec879d98060f1, NAME => 'testtb-testExportFileSystemStateWithSplitRegion,,1733400139725.f4bc653ea89173afedeec879d98060f1.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => 446097405ca8c47d8ea55b39855ba137, NAME => 'testtb-testExportFileSystemStateWithSplitRegion,1,1733400139725.446097405ca8c47d8ea55b39855ba137.', STARTKEY => '1', ENDKEY => ''}] 2024-12-05T12:04:06,942 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(401): Marking 'testtb-testExportFileSystemStateWithSplitRegion' as deleted. 2024-12-05T12:04:06,942 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSplitRegion","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733400246942"}]},"ts":"9223372036854775807"} 2024-12-05T12:04:06,945 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(867): Deleted table testtb-testExportFileSystemStateWithSplitRegion state from META 2024-12-05T12:04:06,946 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(133): Finished pid=44, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSplitRegion 2024-12-05T12:04:06,948 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=44, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSplitRegion in 67 msec 2024-12-05T12:04:07,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=44 2024-12-05T12:04:07,028 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testtb-testExportFileSystemStateWithSplitRegion 2024-12-05T12:04:07,029 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testtb-testExportFileSystemStateWithSplitRegion completed 2024-12-05T12:04:07,044 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "emptySnaptb0-testExportFileSystemStateWithSplitRegion" type: DISABLED 2024-12-05T12:04:07,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] snapshot.SnapshotManager(381): Deleting snapshot: emptySnaptb0-testExportFileSystemStateWithSplitRegion 2024-12-05T12:04:07,049 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "snapshot-testExportFileSystemStateWithSplitRegion" type: DISABLED 2024-12-05T12:04:07,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] snapshot.SnapshotManager(381): Deleting snapshot: snapshot-testExportFileSystemStateWithSplitRegion 2024-12-05T12:04:07,053 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "snaptb0-testExportFileSystemStateWithSplitRegion" type: DISABLED 2024-12-05T12:04:07,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] snapshot.SnapshotManager(381): Deleting snapshot: snaptb0-testExportFileSystemStateWithSplitRegion 2024-12-05T12:04:07,076 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestMobSecureExportSnapshot#testExportFileSystemStateWithSplitRegion Thread=757 (was 719) Potentially hanging thread: RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_OPEN_REGION-regionserver/80666a11a09f:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-11 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-13 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-954888707_22 at /127.0.0.1:34316 [Waiting for operation #4] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DeletionService #0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-14 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: zk-permission-watcher-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-6 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:62) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:883) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RSProcedureDispatcher-pool-3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-8 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-12 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: region-location-4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DeletionService #0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-12 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Container metrics unregistration java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-13 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_OPEN_REGION-regionserver/80666a11a09f:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-3-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-954888707_22 at /127.0.0.1:53392 [Waiting for operation #7] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RSProcedureDispatcher-pool-4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_223988048_1 at /127.0.0.1:53364 [Waiting for operation #3] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RSProcedureDispatcher-pool-5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ContainersLauncher #0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferStack.transfer(SynchronousQueue.java:401) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-10 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Thread-1405 java.base@17.0.11/java.io.FileInputStream.readBytes(Native Method) java.base@17.0.11/java.io.FileInputStream.read(FileInputStream.java:276) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:282) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/sun.nio.cs.StreamDecoder.readBytes(StreamDecoder.java:281) java.base@17.0.11/sun.nio.cs.StreamDecoder.implRead(StreamDecoder.java:324) java.base@17.0.11/sun.nio.cs.StreamDecoder.read(StreamDecoder.java:189) java.base@17.0.11/java.io.InputStreamReader.read(InputStreamReader.java:177) java.base@17.0.11/java.io.BufferedReader.fill(BufferedReader.java:162) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:329) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:396) app//org.apache.hadoop.util.Shell$1.run(Shell.java:1025) Potentially hanging thread: IPC Client (1055630025) connection to localhost/127.0.0.1:38805 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-954888707_22 at /127.0.0.1:44558 [Waiting for operation #3] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: zk-permission-watcher-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: process reaper (pid 125119) java.base@17.0.11/java.lang.ProcessHandleImpl.waitForProcessExit0(Native Method) java.base@17.0.11/java.lang.ProcessHandleImpl$1.run(ProcessHandleImpl.java:150) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-10 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-8 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:62) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:883) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-11 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-5 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:62) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:883) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ContainersLauncher #0 java.base@17.0.11/java.io.FileInputStream.readBytes(Native Method) java.base@17.0.11/java.io.FileInputStream.read(FileInputStream.java:276) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:282) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/sun.nio.cs.StreamDecoder.readBytes(StreamDecoder.java:281) java.base@17.0.11/sun.nio.cs.StreamDecoder.implRead(StreamDecoder.java:324) java.base@17.0.11/sun.nio.cs.StreamDecoder.read(StreamDecoder.java:189) java.base@17.0.11/java.io.InputStreamReader.read(InputStreamReader.java:177) java.base@17.0.11/java.io.BufferedReader.fill(BufferedReader.java:162) java.base@17.0.11/java.io.BufferedReader.read1(BufferedReader.java:213) java.base@17.0.11/java.io.BufferedReader.read(BufferedReader.java:287) app//org.apache.hadoop.util.Shell$ShellCommandExecutor.parseExecResult(Shell.java:1295) app//org.apache.hadoop.util.Shell.runCommand(Shell.java:1054) app//org.apache.hadoop.util.Shell.run(Shell.java:959) app//org.apache.hadoop.util.Shell$ShellCommandExecutor.execute(Shell.java:1282) app//org.apache.hadoop.yarn.server.nodemanager.DefaultContainerExecutor.launchContainer(DefaultContainerExecutor.java:349) app//org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher.ContainerLaunch.launchContainer(ContainerLaunch.java:600) app//org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher.ContainerLaunch.call(ContainerLaunch.java:388) app//org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher.ContainerLaunch.call(ContainerLaunch.java:105) java.base@17.0.11/java.util.concurrent.FutureTask.run(FutureTask.java:264) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: zk-permission-watcher-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-9 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DeletionService #3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-9 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:38805 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: zk-permission-watcher-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: region-location-3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool.commonPool-worker-3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkUntil(LockSupport.java:410) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1726) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-15 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DeletionService #2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-7 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:62) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:883) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-4 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:62) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:883) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DeletionService #1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=804 (was 780) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=467 (was 595), ProcessCount=19 (was 11) - ProcessCount LEAK? -, AvailableMemoryMB=597 (was 6312) 2024-12-05T12:04:07,077 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=757 is superior to 500 2024-12-05T12:04:07,092 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestMobSecureExportSnapshot#testExportWithTargetName Thread=757, OpenFileDescriptor=804, MaxFileDescriptor=1048576, SystemLoadAverage=467, ProcessCount=19, AvailableMemoryMB=597 2024-12-05T12:04:07,093 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=757 is superior to 500 2024-12-05T12:04:07,094 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'testtb-testExportWithTargetName', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-05T12:04:07,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] procedure2.ProcedureExecutor(1139): Stored pid=45, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testtb-testExportWithTargetName 2024-12-05T12:04:07,096 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=45, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportWithTargetName execute state=CREATE_TABLE_PRE_OPERATION 2024-12-05T12:04:07,097 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testExportWithTargetName" procId is: 45 2024-12-05T12:04:07,097 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=45, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testtb-testExportWithTargetName execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-05T12:04:07,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=45 2024-12-05T12:04:07,105 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073741911_1087 (size=442) 2024-12-05T12:04:07,105 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073741911_1087 (size=442) 2024-12-05T12:04:07,105 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073741911_1087 (size=442) 2024-12-05T12:04:07,107 INFO [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 5330e26b609ebe4f4739554721614515, NAME => 'testtb-testExportWithTargetName,,1733400247094.5330e26b609ebe4f4739554721614515.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testExportWithTargetName', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300 2024-12-05T12:04:07,107 INFO [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => e8db6311889743f955ba06d96d2d3405, NAME => 'testtb-testExportWithTargetName,1,1733400247094.e8db6311889743f955ba06d96d2d3405.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testExportWithTargetName', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300 2024-12-05T12:04:07,116 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073741912_1088 (size=67) 2024-12-05T12:04:07,116 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073741912_1088 (size=67) 2024-12-05T12:04:07,117 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073741912_1088 (size=67) 2024-12-05T12:04:07,117 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(898): Instantiated testtb-testExportWithTargetName,1,1733400247094.e8db6311889743f955ba06d96d2d3405.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-05T12:04:07,117 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(1722): Closing e8db6311889743f955ba06d96d2d3405, disabling compactions & flushes 2024-12-05T12:04:07,117 INFO [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(1755): Closing region testtb-testExportWithTargetName,1,1733400247094.e8db6311889743f955ba06d96d2d3405. 2024-12-05T12:04:07,117 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithTargetName,1,1733400247094.e8db6311889743f955ba06d96d2d3405. 2024-12-05T12:04:07,117 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithTargetName,1,1733400247094.e8db6311889743f955ba06d96d2d3405. after waiting 0 ms 2024-12-05T12:04:07,118 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithTargetName,1,1733400247094.e8db6311889743f955ba06d96d2d3405. 2024-12-05T12:04:07,118 INFO [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(1973): Closed testtb-testExportWithTargetName,1,1733400247094.e8db6311889743f955ba06d96d2d3405. 2024-12-05T12:04:07,118 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(1676): Region close journal for e8db6311889743f955ba06d96d2d3405: Waiting for close lock at 1733400247117Disabling compacts and flushes for region at 1733400247117Disabling writes for close at 1733400247118 (+1 ms)Writing region close event to WAL at 1733400247118Closed at 1733400247118 2024-12-05T12:04:07,120 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073741913_1089 (size=67) 2024-12-05T12:04:07,121 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073741913_1089 (size=67) 2024-12-05T12:04:07,121 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073741913_1089 (size=67) 2024-12-05T12:04:07,122 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(898): Instantiated testtb-testExportWithTargetName,,1733400247094.5330e26b609ebe4f4739554721614515.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-05T12:04:07,122 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(1722): Closing 5330e26b609ebe4f4739554721614515, disabling compactions & flushes 2024-12-05T12:04:07,122 INFO [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(1755): Closing region testtb-testExportWithTargetName,,1733400247094.5330e26b609ebe4f4739554721614515. 2024-12-05T12:04:07,122 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithTargetName,,1733400247094.5330e26b609ebe4f4739554721614515. 2024-12-05T12:04:07,122 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithTargetName,,1733400247094.5330e26b609ebe4f4739554721614515. after waiting 0 ms 2024-12-05T12:04:07,122 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithTargetName,,1733400247094.5330e26b609ebe4f4739554721614515. 2024-12-05T12:04:07,122 INFO [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(1973): Closed testtb-testExportWithTargetName,,1733400247094.5330e26b609ebe4f4739554721614515. 2024-12-05T12:04:07,122 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(1676): Region close journal for 5330e26b609ebe4f4739554721614515: Waiting for close lock at 1733400247122Disabling compacts and flushes for region at 1733400247122Disabling writes for close at 1733400247122Writing region close event to WAL at 1733400247122Closed at 1733400247122 2024-12-05T12:04:07,124 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=45, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testtb-testExportWithTargetName execute state=CREATE_TABLE_ADD_TO_META 2024-12-05T12:04:07,125 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportWithTargetName,1,1733400247094.e8db6311889743f955ba06d96d2d3405.","families":{"info":[{"qualifier":"regioninfo","vlen":66,"tag":[],"timestamp":"1733400247124"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733400247124"}]},"ts":"1733400247124"} 2024-12-05T12:04:07,125 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportWithTargetName,,1733400247094.5330e26b609ebe4f4739554721614515.","families":{"info":[{"qualifier":"regioninfo","vlen":66,"tag":[],"timestamp":"1733400247124"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733400247124"}]},"ts":"1733400247124"} 2024-12-05T12:04:07,128 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-12-05T12:04:07,129 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=45, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testtb-testExportWithTargetName execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-05T12:04:07,130 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithTargetName","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733400247129"}]},"ts":"1733400247129"} 2024-12-05T12:04:07,132 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithTargetName, state=ENABLING in hbase:meta 2024-12-05T12:04:07,132 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(204): Hosts are {80666a11a09f=0} racks are {/default-rack=0} 2024-12-05T12:04:07,134 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-05T12:04:07,134 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-05T12:04:07,134 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-05T12:04:07,134 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-05T12:04:07,134 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-05T12:04:07,134 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-05T12:04:07,134 INFO [PEWorker-2 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-05T12:04:07,134 INFO [PEWorker-2 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-05T12:04:07,134 INFO [PEWorker-2 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-05T12:04:07,134 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-05T12:04:07,135 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=46, ppid=45, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=5330e26b609ebe4f4739554721614515, ASSIGN}, {pid=47, ppid=45, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=e8db6311889743f955ba06d96d2d3405, ASSIGN}] 2024-12-05T12:04:07,136 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=47, ppid=45, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=e8db6311889743f955ba06d96d2d3405, ASSIGN 2024-12-05T12:04:07,136 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=46, ppid=45, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=5330e26b609ebe4f4739554721614515, ASSIGN 2024-12-05T12:04:07,137 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=46, ppid=45, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=5330e26b609ebe4f4739554721614515, ASSIGN; state=OFFLINE, location=80666a11a09f,42913,1733400130845; forceNewPlan=false, retain=false 2024-12-05T12:04:07,137 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(269): Starting pid=47, ppid=45, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=e8db6311889743f955ba06d96d2d3405, ASSIGN; state=OFFLINE, location=80666a11a09f,37629,1733400130958; forceNewPlan=false, retain=false 2024-12-05T12:04:07,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=45 2024-12-05T12:04:07,287 INFO [80666a11a09f:34947 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-12-05T12:04:07,288 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=47 updating hbase:meta row=e8db6311889743f955ba06d96d2d3405, regionState=OPENING, regionLocation=80666a11a09f,37629,1733400130958 2024-12-05T12:04:07,288 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=46 updating hbase:meta row=5330e26b609ebe4f4739554721614515, regionState=OPENING, regionLocation=80666a11a09f,42913,1733400130845 2024-12-05T12:04:07,290 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=47, ppid=45, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=e8db6311889743f955ba06d96d2d3405, ASSIGN because future has completed 2024-12-05T12:04:07,290 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=48, ppid=47, state=RUNNABLE, hasLock=false; OpenRegionProcedure e8db6311889743f955ba06d96d2d3405, server=80666a11a09f,37629,1733400130958}] 2024-12-05T12:04:07,291 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=46, ppid=45, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=5330e26b609ebe4f4739554721614515, ASSIGN because future has completed 2024-12-05T12:04:07,291 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=49, ppid=46, state=RUNNABLE, hasLock=false; OpenRegionProcedure 5330e26b609ebe4f4739554721614515, server=80666a11a09f,42913,1733400130845}] 2024-12-05T12:04:07,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=45 2024-12-05T12:04:07,444 DEBUG [RSProcedureDispatcher-pool-4 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-05T12:04:07,446 INFO [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] handler.AssignRegionHandler(132): Open testtb-testExportWithTargetName,1,1733400247094.e8db6311889743f955ba06d96d2d3405. 2024-12-05T12:04:07,446 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51697, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-05T12:04:07,447 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(7752): Opening region: {ENCODED => e8db6311889743f955ba06d96d2d3405, NAME => 'testtb-testExportWithTargetName,1,1733400247094.e8db6311889743f955ba06d96d2d3405.', STARTKEY => '1', ENDKEY => ''} 2024-12-05T12:04:07,447 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportWithTargetName,1,1733400247094.e8db6311889743f955ba06d96d2d3405. service=AccessControlService 2024-12-05T12:04:07,447 INFO [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-05T12:04:07,448 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportWithTargetName e8db6311889743f955ba06d96d2d3405 2024-12-05T12:04:07,448 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(898): Instantiated testtb-testExportWithTargetName,1,1733400247094.e8db6311889743f955ba06d96d2d3405.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-05T12:04:07,448 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(7794): checking encryption for e8db6311889743f955ba06d96d2d3405 2024-12-05T12:04:07,448 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(7797): checking classloading for e8db6311889743f955ba06d96d2d3405 2024-12-05T12:04:07,450 INFO [StoreOpener-e8db6311889743f955ba06d96d2d3405-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region e8db6311889743f955ba06d96d2d3405 2024-12-05T12:04:07,451 INFO [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] handler.AssignRegionHandler(132): Open testtb-testExportWithTargetName,,1733400247094.5330e26b609ebe4f4739554721614515. 2024-12-05T12:04:07,451 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] regionserver.HRegion(7752): Opening region: {ENCODED => 5330e26b609ebe4f4739554721614515, NAME => 'testtb-testExportWithTargetName,,1733400247094.5330e26b609ebe4f4739554721614515.', STARTKEY => '', ENDKEY => '1'} 2024-12-05T12:04:07,451 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportWithTargetName,,1733400247094.5330e26b609ebe4f4739554721614515. service=AccessControlService 2024-12-05T12:04:07,451 INFO [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-05T12:04:07,452 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportWithTargetName 5330e26b609ebe4f4739554721614515 2024-12-05T12:04:07,452 INFO [StoreOpener-e8db6311889743f955ba06d96d2d3405-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region e8db6311889743f955ba06d96d2d3405 columnFamilyName cf 2024-12-05T12:04:07,452 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] regionserver.HRegion(898): Instantiated testtb-testExportWithTargetName,,1733400247094.5330e26b609ebe4f4739554721614515.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-05T12:04:07,452 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] regionserver.HRegion(7794): checking encryption for 5330e26b609ebe4f4739554721614515 2024-12-05T12:04:07,452 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] regionserver.HRegion(7797): checking classloading for 5330e26b609ebe4f4739554721614515 2024-12-05T12:04:07,453 DEBUG [StoreOpener-e8db6311889743f955ba06d96d2d3405-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T12:04:07,453 INFO [StoreOpener-5330e26b609ebe4f4739554721614515-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 5330e26b609ebe4f4739554721614515 2024-12-05T12:04:07,454 INFO [StoreOpener-e8db6311889743f955ba06d96d2d3405-1 {}] regionserver.HStore(327): Store=e8db6311889743f955ba06d96d2d3405/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-05T12:04:07,454 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(1038): replaying wal for e8db6311889743f955ba06d96d2d3405 2024-12-05T12:04:07,456 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportWithTargetName/e8db6311889743f955ba06d96d2d3405 2024-12-05T12:04:07,456 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportWithTargetName/e8db6311889743f955ba06d96d2d3405 2024-12-05T12:04:07,457 INFO [StoreOpener-5330e26b609ebe4f4739554721614515-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 5330e26b609ebe4f4739554721614515 columnFamilyName cf 2024-12-05T12:04:07,457 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(1048): stopping wal replay for e8db6311889743f955ba06d96d2d3405 2024-12-05T12:04:07,457 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(1060): Cleaning up temporary data for e8db6311889743f955ba06d96d2d3405 2024-12-05T12:04:07,457 DEBUG [StoreOpener-5330e26b609ebe4f4739554721614515-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T12:04:07,458 INFO [StoreOpener-5330e26b609ebe4f4739554721614515-1 {}] regionserver.HStore(327): Store=5330e26b609ebe4f4739554721614515/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-05T12:04:07,459 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] regionserver.HRegion(1038): replaying wal for 5330e26b609ebe4f4739554721614515 2024-12-05T12:04:07,459 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(1093): writing seq id for e8db6311889743f955ba06d96d2d3405 2024-12-05T12:04:07,459 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportWithTargetName/5330e26b609ebe4f4739554721614515 2024-12-05T12:04:07,460 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportWithTargetName/5330e26b609ebe4f4739554721614515 2024-12-05T12:04:07,460 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] regionserver.HRegion(1048): stopping wal replay for 5330e26b609ebe4f4739554721614515 2024-12-05T12:04:07,460 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] regionserver.HRegion(1060): Cleaning up temporary data for 5330e26b609ebe4f4739554721614515 2024-12-05T12:04:07,462 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] regionserver.HRegion(1093): writing seq id for 5330e26b609ebe4f4739554721614515 2024-12-05T12:04:07,463 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportWithTargetName/e8db6311889743f955ba06d96d2d3405/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-05T12:04:07,464 INFO [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(1114): Opened e8db6311889743f955ba06d96d2d3405; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=65765212, jitterRate=-0.02002197504043579}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-05T12:04:07,464 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(1122): Running coprocessor post-open hooks for e8db6311889743f955ba06d96d2d3405 2024-12-05T12:04:07,465 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(1006): Region open journal for e8db6311889743f955ba06d96d2d3405: Running coprocessor pre-open hook at 1733400247448Writing region info on filesystem at 1733400247448Initializing all the Stores at 1733400247449 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733400247449Cleaning up temporary data from old regions at 1733400247457 (+8 ms)Running coprocessor post-open hooks at 1733400247464 (+7 ms)Region opened successfully at 1733400247465 (+1 ms) 2024-12-05T12:04:07,466 INFO [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportWithTargetName,1,1733400247094.e8db6311889743f955ba06d96d2d3405., pid=48, masterSystemTime=1733400247442 2024-12-05T12:04:07,467 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportWithTargetName/5330e26b609ebe4f4739554721614515/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-05T12:04:07,467 INFO [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] regionserver.HRegion(1114): Opened 5330e26b609ebe4f4739554721614515; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=59401522, jitterRate=-0.11484834551811218}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-05T12:04:07,467 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 5330e26b609ebe4f4739554721614515 2024-12-05T12:04:07,468 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] regionserver.HRegion(1006): Region open journal for 5330e26b609ebe4f4739554721614515: Running coprocessor pre-open hook at 1733400247452Writing region info on filesystem at 1733400247452Initializing all the Stores at 1733400247453 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733400247453Cleaning up temporary data from old regions at 1733400247461 (+8 ms)Running coprocessor post-open hooks at 1733400247468 (+7 ms)Region opened successfully at 1733400247468 2024-12-05T12:04:07,469 INFO [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportWithTargetName,,1733400247094.5330e26b609ebe4f4739554721614515., pid=49, masterSystemTime=1733400247444 2024-12-05T12:04:07,469 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportWithTargetName,1,1733400247094.e8db6311889743f955ba06d96d2d3405. 2024-12-05T12:04:07,469 INFO [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] handler.AssignRegionHandler(153): Opened testtb-testExportWithTargetName,1,1733400247094.e8db6311889743f955ba06d96d2d3405. 2024-12-05T12:04:07,469 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=47 updating hbase:meta row=e8db6311889743f955ba06d96d2d3405, regionState=OPEN, openSeqNum=2, regionLocation=80666a11a09f,37629,1733400130958 2024-12-05T12:04:07,472 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportWithTargetName,,1733400247094.5330e26b609ebe4f4739554721614515. 2024-12-05T12:04:07,472 INFO [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] handler.AssignRegionHandler(153): Opened testtb-testExportWithTargetName,,1733400247094.5330e26b609ebe4f4739554721614515. 2024-12-05T12:04:07,472 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=48, ppid=47, state=RUNNABLE, hasLock=false; OpenRegionProcedure e8db6311889743f955ba06d96d2d3405, server=80666a11a09f,37629,1733400130958 because future has completed 2024-12-05T12:04:07,473 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=46 updating hbase:meta row=5330e26b609ebe4f4739554721614515, regionState=OPEN, openSeqNum=2, regionLocation=80666a11a09f,42913,1733400130845 2024-12-05T12:04:07,476 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=49, ppid=46, state=RUNNABLE, hasLock=false; OpenRegionProcedure 5330e26b609ebe4f4739554721614515, server=80666a11a09f,42913,1733400130845 because future has completed 2024-12-05T12:04:07,478 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=48, resume processing ppid=47 2024-12-05T12:04:07,478 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=48, ppid=47, state=SUCCESS, hasLock=false; OpenRegionProcedure e8db6311889743f955ba06d96d2d3405, server=80666a11a09f,37629,1733400130958 in 185 msec 2024-12-05T12:04:07,479 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=49, resume processing ppid=46 2024-12-05T12:04:07,479 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=47, ppid=45, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=e8db6311889743f955ba06d96d2d3405, ASSIGN in 344 msec 2024-12-05T12:04:07,479 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=49, ppid=46, state=SUCCESS, hasLock=false; OpenRegionProcedure 5330e26b609ebe4f4739554721614515, server=80666a11a09f,42913,1733400130845 in 186 msec 2024-12-05T12:04:07,482 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=46, resume processing ppid=45 2024-12-05T12:04:07,482 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=46, ppid=45, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=5330e26b609ebe4f4739554721614515, ASSIGN in 345 msec 2024-12-05T12:04:07,483 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=45, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testtb-testExportWithTargetName execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-05T12:04:07,483 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithTargetName","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733400247483"}]},"ts":"1733400247483"} 2024-12-05T12:04:07,485 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithTargetName, state=ENABLED in hbase:meta 2024-12-05T12:04:07,486 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=45, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportWithTargetName execute state=CREATE_TABLE_POST_OPERATION 2024-12-05T12:04:07,486 DEBUG [PEWorker-3 {}] access.PermissionStorage(177): Writing permission with rowKey testtb-testExportWithTargetName jenkins: RWXCA 2024-12-05T12:04:07,489 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42305 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportWithTargetName], kv [jenkins: RWXCA] 2024-12-05T12:04:07,525 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37629-0x101a6a890970002, quorum=127.0.0.1:49240, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-05T12:04:07,525 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42305-0x101a6a890970003, quorum=127.0.0.1:49240, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-05T12:04:07,525 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34947-0x101a6a890970000, quorum=127.0.0.1:49240, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-05T12:04:07,525 DEBUG [pool-63-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42913-0x101a6a890970001, quorum=127.0.0.1:49240, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-05T12:04:07,534 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithTargetName with data PBUF\x0AE\x0A\x07jenkins\x12:\x08\x03"6\x0A*\x0A\x07default\x12\x1Ftesttb-testExportWithTargetName \x00 \x01 \x02 \x03 \x04 2024-12-05T12:04:07,534 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithTargetName with data PBUF\x0AE\x0A\x07jenkins\x12:\x08\x03"6\x0A*\x0A\x07default\x12\x1Ftesttb-testExportWithTargetName \x00 \x01 \x02 \x03 \x04 2024-12-05T12:04:07,534 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithTargetName with data PBUF\x0AE\x0A\x07jenkins\x12:\x08\x03"6\x0A*\x0A\x07default\x12\x1Ftesttb-testExportWithTargetName \x00 \x01 \x02 \x03 \x04 2024-12-05T12:04:07,534 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithTargetName with data PBUF\x0AE\x0A\x07jenkins\x12:\x08\x03"6\x0A*\x0A\x07default\x12\x1Ftesttb-testExportWithTargetName \x00 \x01 \x02 \x03 \x04 2024-12-05T12:04:07,536 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=45, state=SUCCESS, hasLock=false; CreateTableProcedure table=testtb-testExportWithTargetName in 440 msec 2024-12-05T12:04:07,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=45 2024-12-05T12:04:07,728 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testtb-testExportWithTargetName completed 2024-12-05T12:04:07,728 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithTargetName,, stopping at row=testtb-testExportWithTargetName ,, for max=2147483647 with caching=100 2024-12-05T12:04:07,732 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testExportWithTargetName 2024-12-05T12:04:07,732 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testExportWithTargetName,,1733400247094.5330e26b609ebe4f4739554721614515. 2024-12-05T12:04:07,732 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-05T12:04:07,734 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithTargetName,, stopping at row=testtb-testExportWithTargetName ,, for max=2147483647 with caching=100 2024-12-05T12:04:07,743 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithTargetName,, stopping at row=testtb-testExportWithTargetName ,, for max=2147483647 with caching=100 2024-12-05T12:04:07,748 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-05T12:04:07,750 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55488, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-05T12:04:07,755 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithTargetName,, stopping at row=testtb-testExportWithTargetName ,, for max=2147483647 with caching=100 2024-12-05T12:04:07,759 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } 2024-12-05T12:04:07,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1733400247759 (current time:1733400247759). 2024-12-05T12:04:07,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-05T12:04:07,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot emptySnaptb0-testExportWithTargetName VERSION not specified, setting to 2 2024-12-05T12:04:07,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-05T12:04:07,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@14e84742, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T12:04:07,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] client.ClusterIdFetcher(90): Going to request 80666a11a09f,34947,-1 for getting cluster id 2024-12-05T12:04:07,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-05T12:04:07,760 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'af3bc8f5-5d92-4279-9ca6-4b73c59b8f54' 2024-12-05T12:04:07,761 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-05T12:04:07,761 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "af3bc8f5-5d92-4279-9ca6-4b73c59b8f54" 2024-12-05T12:04:07,761 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5fd327b7, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T12:04:07,761 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [80666a11a09f,34947,-1] 2024-12-05T12:04:07,761 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-05T12:04:07,761 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T12:04:07,762 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39932, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-05T12:04:07,763 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@53c3e56b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T12:04:07,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-05T12:04:07,765 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=80666a11a09f,37629,1733400130958, seqNum=-1] 2024-12-05T12:04:07,765 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-05T12:04:07,766 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56454, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-05T12:04:07,767 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947. 2024-12-05T12:04:07,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-05T12:04:07,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T12:04:07,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T12:04:07,767 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-05T12:04:07,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4427800a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T12:04:07,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] client.ClusterIdFetcher(90): Going to request 80666a11a09f,34947,-1 for getting cluster id 2024-12-05T12:04:07,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-05T12:04:07,769 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'af3bc8f5-5d92-4279-9ca6-4b73c59b8f54' 2024-12-05T12:04:07,769 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-05T12:04:07,769 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "af3bc8f5-5d92-4279-9ca6-4b73c59b8f54" 2024-12-05T12:04:07,769 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3ffaa9e6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T12:04:07,769 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [80666a11a09f,34947,-1] 2024-12-05T12:04:07,770 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-05T12:04:07,770 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T12:04:07,770 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39946, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-05T12:04:07,771 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@493065a8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T12:04:07,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-05T12:04:07,772 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=80666a11a09f,37629,1733400130958, seqNum=-1] 2024-12-05T12:04:07,772 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-05T12:04:07,773 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56468, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-05T12:04:07,775 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportWithTargetName', locateType=CURRENT is [region=hbase:acl,,1733400133831.02119c1ced80736cbd0ab02ca19ee30c., hostname=80666a11a09f,42305,1733400131012, seqNum=2] 2024-12-05T12:04:07,775 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-05T12:04:07,776 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56212, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-05T12:04:07,777 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947. 2024-12-05T12:04:07,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-05T12:04:07,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T12:04:07,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T12:04:07,778 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-05T12:04:07,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportWithTargetName], kv [jenkins: RWXCA] 2024-12-05T12:04:07,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-05T12:04:07,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] procedure2.ProcedureExecutor(1139): Stored pid=50, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=50, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } 2024-12-05T12:04:07,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 }, snapshot procedure id = 50 2024-12-05T12:04:07,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=50 2024-12-05T12:04:07,781 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=50, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=50, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-05T12:04:07,782 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=50, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=50, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-05T12:04:07,785 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=50, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=50, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-05T12:04:07,790 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073741914_1090 (size=167) 2024-12-05T12:04:07,791 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073741914_1090 (size=167) 2024-12-05T12:04:07,791 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073741914_1090 (size=167) 2024-12-05T12:04:07,792 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=50, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=50, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-05T12:04:07,792 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=51, ppid=50, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 5330e26b609ebe4f4739554721614515}, {pid=52, ppid=50, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure e8db6311889743f955ba06d96d2d3405}] 2024-12-05T12:04:07,793 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=51, ppid=50, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 5330e26b609ebe4f4739554721614515 2024-12-05T12:04:07,793 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=52, ppid=50, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure e8db6311889743f955ba06d96d2d3405 2024-12-05T12:04:07,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=50 2024-12-05T12:04:07,945 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37629 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=52 2024-12-05T12:04:07,945 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42913 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=51 2024-12-05T12:04:07,946 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=52}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithTargetName,1,1733400247094.e8db6311889743f955ba06d96d2d3405. 2024-12-05T12:04:07,946 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=51}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithTargetName,,1733400247094.5330e26b609ebe4f4739554721614515. 2024-12-05T12:04:07,946 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=51}] regionserver.HRegion(2603): Flush status journal for 5330e26b609ebe4f4739554721614515: 2024-12-05T12:04:07,946 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=52}] regionserver.HRegion(2603): Flush status journal for e8db6311889743f955ba06d96d2d3405: 2024-12-05T12:04:07,946 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=51}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithTargetName,,1733400247094.5330e26b609ebe4f4739554721614515. for emptySnaptb0-testExportWithTargetName completed. 2024-12-05T12:04:07,946 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=52}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithTargetName,1,1733400247094.e8db6311889743f955ba06d96d2d3405. for emptySnaptb0-testExportWithTargetName completed. 2024-12-05T12:04:07,946 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=52}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithTargetName,1,1733400247094.e8db6311889743f955ba06d96d2d3405.' region-info for snapshot=emptySnaptb0-testExportWithTargetName 2024-12-05T12:04:07,946 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=51}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithTargetName,,1733400247094.5330e26b609ebe4f4739554721614515.' region-info for snapshot=emptySnaptb0-testExportWithTargetName 2024-12-05T12:04:07,947 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=52}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-05T12:04:07,947 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=51}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-05T12:04:07,947 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=51}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-05T12:04:07,947 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=52}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-05T12:04:07,954 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073741916_1092 (size=70) 2024-12-05T12:04:07,954 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073741916_1092 (size=70) 2024-12-05T12:04:07,954 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073741916_1092 (size=70) 2024-12-05T12:04:07,954 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073741915_1091 (size=70) 2024-12-05T12:04:07,954 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073741915_1091 (size=70) 2024-12-05T12:04:07,955 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073741915_1091 (size=70) 2024-12-05T12:04:07,955 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=52}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithTargetName,1,1733400247094.e8db6311889743f955ba06d96d2d3405. 2024-12-05T12:04:07,955 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=52}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=52 2024-12-05T12:04:07,955 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=51}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithTargetName,,1733400247094.5330e26b609ebe4f4739554721614515. 2024-12-05T12:04:07,955 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=51}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=51 2024-12-05T12:04:07,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] master.HMaster(4169): Remote procedure done, pid=52 2024-12-05T12:04:07,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] master.HMaster(4169): Remote procedure done, pid=51 2024-12-05T12:04:07,955 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportWithTargetName on region e8db6311889743f955ba06d96d2d3405 2024-12-05T12:04:07,955 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportWithTargetName on region 5330e26b609ebe4f4739554721614515 2024-12-05T12:04:07,956 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=51, ppid=50, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 5330e26b609ebe4f4739554721614515 2024-12-05T12:04:07,956 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=52, ppid=50, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure e8db6311889743f955ba06d96d2d3405 2024-12-05T12:04:07,958 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=52, ppid=50, state=SUCCESS, hasLock=false; SnapshotRegionProcedure e8db6311889743f955ba06d96d2d3405 in 164 msec 2024-12-05T12:04:07,964 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=51, resume processing ppid=50 2024-12-05T12:04:07,964 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=50, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=50, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-05T12:04:07,964 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=51, ppid=50, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 5330e26b609ebe4f4739554721614515 in 164 msec 2024-12-05T12:04:07,965 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=50, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=50, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-05T12:04:07,966 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-12-05T12:04:07,966 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-12-05T12:04:07,966 DEBUG [MobRegionSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T12:04:07,967 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(328): No files under family: cf 2024-12-05T12:04:07,974 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073741917_1093 (size=62) 2024-12-05T12:04:07,974 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073741917_1093 (size=62) 2024-12-05T12:04:07,975 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073741917_1093 (size=62) 2024-12-05T12:04:07,976 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=50, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=50, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-05T12:04:07,976 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testExportWithTargetName 2024-12-05T12:04:07,977 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/.hbase-snapshot/.tmp/emptySnaptb0-testExportWithTargetName 2024-12-05T12:04:07,991 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073741918_1094 (size=649) 2024-12-05T12:04:07,991 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073741918_1094 (size=649) 2024-12-05T12:04:07,991 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073741918_1094 (size=649) 2024-12-05T12:04:07,995 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=50, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=50, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-05T12:04:08,007 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=50, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=50, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-05T12:04:08,008 DEBUG [PEWorker-1 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/.hbase-snapshot/.tmp/emptySnaptb0-testExportWithTargetName to hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/.hbase-snapshot/emptySnaptb0-testExportWithTargetName 2024-12-05T12:04:08,010 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=50, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=50, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-05T12:04:08,010 DEBUG [PEWorker-1 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 }, snapshot procedure id = 50 2024-12-05T12:04:08,012 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=50, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=50, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } in 232 msec 2024-12-05T12:04:08,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=50 2024-12-05T12:04:08,099 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportWithTargetName completed 2024-12-05T12:04:08,110 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37629 {}] regionserver.HRegion(8528): writing data to region testtb-testExportWithTargetName,1,1733400247094.e8db6311889743f955ba06d96d2d3405. with WAL disabled. Data may be lost in the event of a crash. 2024-12-05T12:04:08,112 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42913 {}] regionserver.HRegion(8528): writing data to region testtb-testExportWithTargetName,,1733400247094.5330e26b609ebe4f4739554721614515. with WAL disabled. Data may be lost in the event of a crash. 2024-12-05T12:04:08,113 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithTargetName,, stopping at row=testtb-testExportWithTargetName ,, for max=2147483647 with caching=100 2024-12-05T12:04:08,117 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testExportWithTargetName 2024-12-05T12:04:08,117 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testExportWithTargetName,,1733400247094.5330e26b609ebe4f4739554721614515. 2024-12-05T12:04:08,117 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-05T12:04:08,120 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithTargetName,, stopping at row=testtb-testExportWithTargetName ,, for max=2147483647 with caching=100 2024-12-05T12:04:08,127 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithTargetName,, stopping at row=testtb-testExportWithTargetName ,, for max=2147483647 with caching=100 2024-12-05T12:04:08,136 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithTargetName,, stopping at row=testtb-testExportWithTargetName ,, for max=2147483647 with caching=100 2024-12-05T12:04:08,141 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } 2024-12-05T12:04:08,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1733400248141 (current time:1733400248141). 2024-12-05T12:04:08,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-05T12:04:08,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snaptb0-testExportWithTargetName VERSION not specified, setting to 2 2024-12-05T12:04:08,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-05T12:04:08,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@796de440, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T12:04:08,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] client.ClusterIdFetcher(90): Going to request 80666a11a09f,34947,-1 for getting cluster id 2024-12-05T12:04:08,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-05T12:04:08,144 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'af3bc8f5-5d92-4279-9ca6-4b73c59b8f54' 2024-12-05T12:04:08,144 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-05T12:04:08,145 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "af3bc8f5-5d92-4279-9ca6-4b73c59b8f54" 2024-12-05T12:04:08,145 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@d69b442, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T12:04:08,145 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [80666a11a09f,34947,-1] 2024-12-05T12:04:08,145 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-05T12:04:08,146 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T12:04:08,147 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47804, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-05T12:04:08,148 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5e74e490, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T12:04:08,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-05T12:04:08,150 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=80666a11a09f,37629,1733400130958, seqNum=-1] 2024-12-05T12:04:08,150 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-05T12:04:08,152 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36426, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-05T12:04:08,154 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947. 2024-12-05T12:04:08,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-05T12:04:08,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T12:04:08,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T12:04:08,154 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-05T12:04:08,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@49369060, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T12:04:08,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] client.ClusterIdFetcher(90): Going to request 80666a11a09f,34947,-1 for getting cluster id 2024-12-05T12:04:08,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-05T12:04:08,172 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'af3bc8f5-5d92-4279-9ca6-4b73c59b8f54' 2024-12-05T12:04:08,172 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-05T12:04:08,172 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "af3bc8f5-5d92-4279-9ca6-4b73c59b8f54" 2024-12-05T12:04:08,172 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@608c18d8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T12:04:08,172 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [80666a11a09f,34947,-1] 2024-12-05T12:04:08,173 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-05T12:04:08,173 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T12:04:08,174 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47814, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-05T12:04:08,175 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@ed43df4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T12:04:08,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-05T12:04:08,177 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=80666a11a09f,37629,1733400130958, seqNum=-1] 2024-12-05T12:04:08,177 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-05T12:04:08,179 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36440, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-05T12:04:08,181 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportWithTargetName', locateType=CURRENT is [region=hbase:acl,,1733400133831.02119c1ced80736cbd0ab02ca19ee30c., hostname=80666a11a09f,42305,1733400131012, seqNum=2] 2024-12-05T12:04:08,181 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-05T12:04:08,182 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57506, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-05T12:04:08,183 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947. 2024-12-05T12:04:08,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-05T12:04:08,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T12:04:08,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T12:04:08,184 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-05T12:04:08,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportWithTargetName], kv [jenkins: RWXCA] 2024-12-05T12:04:08,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-05T12:04:08,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] procedure2.ProcedureExecutor(1139): Stored pid=53, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=53, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } 2024-12-05T12:04:08,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 }, snapshot procedure id = 53 2024-12-05T12:04:08,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=53 2024-12-05T12:04:08,188 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=53, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=53, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-05T12:04:08,189 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=53, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=53, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-05T12:04:08,192 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=53, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=53, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-05T12:04:08,219 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073741919_1095 (size=162) 2024-12-05T12:04:08,219 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073741919_1095 (size=162) 2024-12-05T12:04:08,219 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073741919_1095 (size=162) 2024-12-05T12:04:08,227 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=53, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=53, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-05T12:04:08,227 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=54, ppid=53, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 5330e26b609ebe4f4739554721614515}, {pid=55, ppid=53, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure e8db6311889743f955ba06d96d2d3405}] 2024-12-05T12:04:08,228 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=55, ppid=53, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure e8db6311889743f955ba06d96d2d3405 2024-12-05T12:04:08,228 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=54, ppid=53, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 5330e26b609ebe4f4739554721614515 2024-12-05T12:04:08,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=53 2024-12-05T12:04:08,379 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37629 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=55 2024-12-05T12:04:08,379 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42913 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=54 2024-12-05T12:04:08,379 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithTargetName,1,1733400247094.e8db6311889743f955ba06d96d2d3405. 2024-12-05T12:04:08,379 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithTargetName,,1733400247094.5330e26b609ebe4f4739554721614515. 2024-12-05T12:04:08,380 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] regionserver.HRegion(2902): Flushing e8db6311889743f955ba06d96d2d3405 1/1 column families, dataSize=3.00 KB heapSize=6.72 KB 2024-12-05T12:04:08,380 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] regionserver.HRegion(2902): Flushing 5330e26b609ebe4f4739554721614515 1/1 column families, dataSize=266 B heapSize=832 B 2024-12-05T12:04:08,403 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b20241205550d85b1be81425bb8e9b0d45f6fd8dd_e8db6311889743f955ba06d96d2d3405 is 71, key is 1aadf309b4b0b88bca4079a7670db6fb/cf:q/1733400248110/Put/seqid=0 2024-12-05T12:04:08,404 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241205e4c1ec223ce74b09b4082503ef010ec9_5330e26b609ebe4f4739554721614515 is 71, key is 01e2b5e681578ce5e163fa912bdd1dea/cf:q/1733400248111/Put/seqid=0 2024-12-05T12:04:08,421 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073741921_1097 (size=8102) 2024-12-05T12:04:08,421 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073741921_1097 (size=8102) 2024-12-05T12:04:08,421 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073741921_1097 (size=8102) 2024-12-05T12:04:08,422 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T12:04:08,429 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b20241205550d85b1be81425bb8e9b0d45f6fd8dd_e8db6311889743f955ba06d96d2d3405 to hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/mobdir/data/default/testtb-testExportWithTargetName/90c8eeaaf01a24f585da11044309be71/cf/c4ca4238a0b923820dcc509a6f75849b20241205550d85b1be81425bb8e9b0d45f6fd8dd_e8db6311889743f955ba06d96d2d3405 2024-12-05T12:04:08,430 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportWithTargetName/e8db6311889743f955ba06d96d2d3405/.tmp/cf/a0b07f6c133549778217935ac9fc98db, store: [table=testtb-testExportWithTargetName family=cf region=e8db6311889743f955ba06d96d2d3405] 2024-12-05T12:04:08,432 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportWithTargetName/e8db6311889743f955ba06d96d2d3405/.tmp/cf/a0b07f6c133549778217935ac9fc98db is 208, key is 1874b234aa0beaff56ca7dac2d8ced188/cf:q/1733400248110/Put/seqid=0 2024-12-05T12:04:08,432 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073741920_1096 (size=5172) 2024-12-05T12:04:08,435 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073741920_1096 (size=5172) 2024-12-05T12:04:08,436 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073741920_1096 (size=5172) 2024-12-05T12:04:08,436 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T12:04:08,444 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073741922_1098 (size=14747) 2024-12-05T12:04:08,446 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073741922_1098 (size=14747) 2024-12-05T12:04:08,446 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073741922_1098 (size=14747) 2024-12-05T12:04:08,448 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=6, memsize=3.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportWithTargetName/e8db6311889743f955ba06d96d2d3405/.tmp/cf/a0b07f6c133549778217935ac9fc98db 2024-12-05T12:04:08,465 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241205e4c1ec223ce74b09b4082503ef010ec9_5330e26b609ebe4f4739554721614515 to hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/mobdir/data/default/testtb-testExportWithTargetName/90c8eeaaf01a24f585da11044309be71/cf/d41d8cd98f00b204e9800998ecf8427e20241205e4c1ec223ce74b09b4082503ef010ec9_5330e26b609ebe4f4739554721614515 2024-12-05T12:04:08,465 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportWithTargetName/e8db6311889743f955ba06d96d2d3405/.tmp/cf/a0b07f6c133549778217935ac9fc98db as hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportWithTargetName/e8db6311889743f955ba06d96d2d3405/cf/a0b07f6c133549778217935ac9fc98db 2024-12-05T12:04:08,466 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportWithTargetName/5330e26b609ebe4f4739554721614515/.tmp/cf/87c639b6d3304c39bc8a74dead0da691, store: [table=testtb-testExportWithTargetName family=cf region=5330e26b609ebe4f4739554721614515] 2024-12-05T12:04:08,467 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportWithTargetName/5330e26b609ebe4f4739554721614515/.tmp/cf/87c639b6d3304c39bc8a74dead0da691 is 208, key is 0c743ddb6fe18eb32917c7183384f1216/cf:q/1733400248111/Put/seqid=0 2024-12-05T12:04:08,474 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportWithTargetName/e8db6311889743f955ba06d96d2d3405/cf/a0b07f6c133549778217935ac9fc98db, entries=46, sequenceid=6, filesize=14.4 K 2024-12-05T12:04:08,475 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] regionserver.HRegion(3140): Finished flush of dataSize ~3.00 KB/3070, heapSize ~6.70 KB/6864, currentSize=0 B/0 for e8db6311889743f955ba06d96d2d3405 in 96ms, sequenceid=6, compaction requested=false 2024-12-05T12:04:08,475 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportWithTargetName' 2024-12-05T12:04:08,476 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] regionserver.HRegion(2603): Flush status journal for e8db6311889743f955ba06d96d2d3405: 2024-12-05T12:04:08,476 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithTargetName,1,1733400247094.e8db6311889743f955ba06d96d2d3405. for snaptb0-testExportWithTargetName completed. 2024-12-05T12:04:08,476 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithTargetName,1,1733400247094.e8db6311889743f955ba06d96d2d3405.' region-info for snapshot=snaptb0-testExportWithTargetName 2024-12-05T12:04:08,476 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-05T12:04:08,476 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportWithTargetName/e8db6311889743f955ba06d96d2d3405/cf/a0b07f6c133549778217935ac9fc98db] hfiles 2024-12-05T12:04:08,476 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportWithTargetName/e8db6311889743f955ba06d96d2d3405/cf/a0b07f6c133549778217935ac9fc98db for snapshot=snaptb0-testExportWithTargetName 2024-12-05T12:04:08,478 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073741923_1099 (size=6118) 2024-12-05T12:04:08,479 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073741923_1099 (size=6118) 2024-12-05T12:04:08,480 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073741923_1099 (size=6118) 2024-12-05T12:04:08,481 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=6, memsize=266, hasBloomFilter=true, into tmp file hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportWithTargetName/5330e26b609ebe4f4739554721614515/.tmp/cf/87c639b6d3304c39bc8a74dead0da691 2024-12-05T12:04:08,489 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportWithTargetName/5330e26b609ebe4f4739554721614515/.tmp/cf/87c639b6d3304c39bc8a74dead0da691 as hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportWithTargetName/5330e26b609ebe4f4739554721614515/cf/87c639b6d3304c39bc8a74dead0da691 2024-12-05T12:04:08,491 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073741924_1100 (size=109) 2024-12-05T12:04:08,492 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073741924_1100 (size=109) 2024-12-05T12:04:08,492 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithTargetName,1,1733400247094.e8db6311889743f955ba06d96d2d3405. 2024-12-05T12:04:08,492 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073741924_1100 (size=109) 2024-12-05T12:04:08,492 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=55 2024-12-05T12:04:08,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] master.HMaster(4169): Remote procedure done, pid=55 2024-12-05T12:04:08,493 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportWithTargetName on region e8db6311889743f955ba06d96d2d3405 2024-12-05T12:04:08,493 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=55, ppid=53, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure e8db6311889743f955ba06d96d2d3405 2024-12-05T12:04:08,496 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportWithTargetName/5330e26b609ebe4f4739554721614515/cf/87c639b6d3304c39bc8a74dead0da691, entries=4, sequenceid=6, filesize=6.0 K 2024-12-05T12:04:08,496 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=55, ppid=53, state=SUCCESS, hasLock=false; SnapshotRegionProcedure e8db6311889743f955ba06d96d2d3405 in 267 msec 2024-12-05T12:04:08,496 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] regionserver.HRegion(3140): Finished flush of dataSize ~266 B/266, heapSize ~816 B/816, currentSize=0 B/0 for 5330e26b609ebe4f4739554721614515 in 117ms, sequenceid=6, compaction requested=false 2024-12-05T12:04:08,496 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] regionserver.HRegion(2603): Flush status journal for 5330e26b609ebe4f4739554721614515: 2024-12-05T12:04:08,496 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithTargetName,,1733400247094.5330e26b609ebe4f4739554721614515. for snaptb0-testExportWithTargetName completed. 2024-12-05T12:04:08,497 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithTargetName,,1733400247094.5330e26b609ebe4f4739554721614515.' region-info for snapshot=snaptb0-testExportWithTargetName 2024-12-05T12:04:08,497 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-05T12:04:08,497 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportWithTargetName/5330e26b609ebe4f4739554721614515/cf/87c639b6d3304c39bc8a74dead0da691] hfiles 2024-12-05T12:04:08,497 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportWithTargetName/5330e26b609ebe4f4739554721614515/cf/87c639b6d3304c39bc8a74dead0da691 for snapshot=snaptb0-testExportWithTargetName 2024-12-05T12:04:08,503 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073741925_1101 (size=109) 2024-12-05T12:04:08,503 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073741925_1101 (size=109) 2024-12-05T12:04:08,504 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073741925_1101 (size=109) 2024-12-05T12:04:08,505 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithTargetName,,1733400247094.5330e26b609ebe4f4739554721614515. 2024-12-05T12:04:08,506 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=54 2024-12-05T12:04:08,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] master.HMaster(4169): Remote procedure done, pid=54 2024-12-05T12:04:08,506 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportWithTargetName on region 5330e26b609ebe4f4739554721614515 2024-12-05T12:04:08,506 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=54, ppid=53, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 5330e26b609ebe4f4739554721614515 2024-12-05T12:04:08,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=53 2024-12-05T12:04:08,510 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=54, resume processing ppid=53 2024-12-05T12:04:08,510 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=53, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=53, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-05T12:04:08,510 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=54, ppid=53, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 5330e26b609ebe4f4739554721614515 in 280 msec 2024-12-05T12:04:08,511 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=53, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=53, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-05T12:04:08,512 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-12-05T12:04:08,512 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-12-05T12:04:08,512 DEBUG [MobRegionSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T12:04:08,513 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(366): Adding snapshot references for [hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/mobdir/data/default/testtb-testExportWithTargetName/90c8eeaaf01a24f585da11044309be71/cf/c4ca4238a0b923820dcc509a6f75849b20241205550d85b1be81425bb8e9b0d45f6fd8dd_e8db6311889743f955ba06d96d2d3405, hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/mobdir/data/default/testtb-testExportWithTargetName/90c8eeaaf01a24f585da11044309be71/cf/d41d8cd98f00b204e9800998ecf8427e20241205e4c1ec223ce74b09b4082503ef010ec9_5330e26b609ebe4f4739554721614515] hfiles 2024-12-05T12:04:08,514 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (1/2): hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/mobdir/data/default/testtb-testExportWithTargetName/90c8eeaaf01a24f585da11044309be71/cf/c4ca4238a0b923820dcc509a6f75849b20241205550d85b1be81425bb8e9b0d45f6fd8dd_e8db6311889743f955ba06d96d2d3405 2024-12-05T12:04:08,514 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (2/2): hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/mobdir/data/default/testtb-testExportWithTargetName/90c8eeaaf01a24f585da11044309be71/cf/d41d8cd98f00b204e9800998ecf8427e20241205e4c1ec223ce74b09b4082503ef010ec9_5330e26b609ebe4f4739554721614515 2024-12-05T12:04:08,523 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073741926_1102 (size=293) 2024-12-05T12:04:08,523 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073741926_1102 (size=293) 2024-12-05T12:04:08,523 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073741926_1102 (size=293) 2024-12-05T12:04:08,525 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=53, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=53, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-05T12:04:08,525 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testExportWithTargetName 2024-12-05T12:04:08,525 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/.hbase-snapshot/.tmp/snaptb0-testExportWithTargetName 2024-12-05T12:04:08,535 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073741927_1103 (size=959) 2024-12-05T12:04:08,536 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073741927_1103 (size=959) 2024-12-05T12:04:08,536 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073741927_1103 (size=959) 2024-12-05T12:04:08,540 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=53, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=53, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-05T12:04:08,548 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=53, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=53, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-05T12:04:08,549 DEBUG [PEWorker-4 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/.hbase-snapshot/.tmp/snaptb0-testExportWithTargetName to hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/.hbase-snapshot/snaptb0-testExportWithTargetName 2024-12-05T12:04:08,550 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=53, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=53, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-05T12:04:08,550 DEBUG [PEWorker-4 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 }, snapshot procedure id = 53 2024-12-05T12:04:08,552 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=53, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=53, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } in 365 msec 2024-12-05T12:04:08,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=53 2024-12-05T12:04:08,818 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportWithTargetName completed 2024-12-05T12:04:08,819 INFO [Time-limited test {}] snapshot.TestExportSnapshot(515): HDFS export destination path: hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/export-test/export-1733400248819 2024-12-05T12:04:08,819 INFO [Time-limited test {}] snapshot.TestExportSnapshot(542): tgtFsUri=hdfs://localhost:37541, tgtDir=hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/export-test/export-1733400248819, rawTgtDir=hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/export-test/export-1733400248819, srcFsUri=hdfs://localhost:37541, srcDir=hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300 2024-12-05T12:04:08,846 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1094): inputFs=hdfs://localhost:37541, inputRoot=hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300 2024-12-05T12:04:08,846 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1095): outputFs=DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-954888707_22, ugi=jenkins (auth:SIMPLE)]], outputRoot=hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/export-test/export-1733400248819, skipTmp=false, initialOutputSnapshotDir=hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/export-test/export-1733400248819/.hbase-snapshot/.tmp/testExportWithTargetName 2024-12-05T12:04:08,848 INFO [Time-limited test {}] snapshot.ExportSnapshot(1104): Verify the source snapshot's expiration status and integrity. 2024-12-05T12:04:08,853 INFO [Time-limited test {}] snapshot.ExportSnapshot(1162): Copy Snapshot Manifest from hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/.hbase-snapshot/snaptb0-testExportWithTargetName to hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/export-test/export-1733400248819/.hbase-snapshot/.tmp/testExportWithTargetName 2024-12-05T12:04:08,872 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073741929_1105 (size=162) 2024-12-05T12:04:08,873 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073741929_1105 (size=162) 2024-12-05T12:04:08,873 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073741928_1104 (size=959) 2024-12-05T12:04:08,873 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073741929_1105 (size=162) 2024-12-05T12:04:08,874 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073741928_1104 (size=959) 2024-12-05T12:04:08,874 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073741928_1104 (size=959) 2024-12-05T12:04:08,881 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073741930_1106 (size=154) 2024-12-05T12:04:08,881 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073741930_1106 (size=154) 2024-12-05T12:04:08,882 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073741930_1106 (size=154) 2024-12-05T12:04:08,883 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-common/target/hbase-common-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-05T12:04:08,884 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-protocol-shaded/target/hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-05T12:04:08,884 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-client/target/hbase-client-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-05T12:04:08,981 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-05T12:04:09,785 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/b25dcafd-a00c-c924-93f8-4d057d2ca201/hadoop-4804032947865650123.jar 2024-12-05T12:04:09,786 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-05T12:04:09,786 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-05T12:04:09,850 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/b25dcafd-a00c-c924-93f8-4d057d2ca201/hadoop-13479662463962444904.jar 2024-12-05T12:04:09,850 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics/target/hbase-metrics-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-05T12:04:09,851 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics-api/target/hbase-metrics-api-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-05T12:04:09,851 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-replication/target/hbase-replication-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-05T12:04:09,851 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-http/target/hbase-http-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-05T12:04:09,852 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-procedure/target/hbase-procedure-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-05T12:04:09,852 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-zookeeper/target/hbase-zookeeper-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-05T12:04:09,852 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-12-05T12:04:09,852 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-12-05T12:04:09,853 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-12-05T12:04:09,853 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-12-05T12:04:09,853 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-12-05T12:04:09,854 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-12-05T12:04:09,854 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-12-05T12:04:09,854 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-12-05T12:04:09,855 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-12-05T12:04:09,855 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-12-05T12:04:09,855 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-12-05T12:04:09,856 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-05T12:04:09,856 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-05T12:04:09,856 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-05T12:04:09,857 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-05T12:04:09,857 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-05T12:04:09,857 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-05T12:04:09,858 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-05T12:04:09,936 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073741931_1107 (size=131440) 2024-12-05T12:04:09,937 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073741931_1107 (size=131440) 2024-12-05T12:04:09,937 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073741931_1107 (size=131440) 2024-12-05T12:04:09,952 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073741932_1108 (size=4188619) 2024-12-05T12:04:09,952 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073741932_1108 (size=4188619) 2024-12-05T12:04:09,952 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073741932_1108 (size=4188619) 2024-12-05T12:04:09,963 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073741933_1109 (size=1323991) 2024-12-05T12:04:09,963 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073741933_1109 (size=1323991) 2024-12-05T12:04:09,964 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073741933_1109 (size=1323991) 2024-12-05T12:04:09,979 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073741934_1110 (size=903934) 2024-12-05T12:04:09,979 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073741934_1110 (size=903934) 2024-12-05T12:04:09,979 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073741934_1110 (size=903934) 2024-12-05T12:04:10,026 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073741935_1111 (size=8360360) 2024-12-05T12:04:10,027 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073741935_1111 (size=8360360) 2024-12-05T12:04:10,027 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073741935_1111 (size=8360360) 2024-12-05T12:04:10,041 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073741936_1112 (size=1877034) 2024-12-05T12:04:10,041 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073741936_1112 (size=1877034) 2024-12-05T12:04:10,042 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073741936_1112 (size=1877034) 2024-12-05T12:04:10,050 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073741937_1113 (size=77835) 2024-12-05T12:04:10,050 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073741937_1113 (size=77835) 2024-12-05T12:04:10,051 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073741937_1113 (size=77835) 2024-12-05T12:04:10,063 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073741938_1114 (size=30949) 2024-12-05T12:04:10,063 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073741938_1114 (size=30949) 2024-12-05T12:04:10,063 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073741938_1114 (size=30949) 2024-12-05T12:04:10,078 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073741939_1115 (size=1597213) 2024-12-05T12:04:10,078 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073741939_1115 (size=1597213) 2024-12-05T12:04:10,078 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073741939_1115 (size=1597213) 2024-12-05T12:04:10,088 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073741940_1116 (size=443172) 2024-12-05T12:04:10,088 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073741940_1116 (size=443172) 2024-12-05T12:04:10,088 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073741940_1116 (size=443172) 2024-12-05T12:04:10,108 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073741941_1117 (size=4695811) 2024-12-05T12:04:10,108 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073741941_1117 (size=4695811) 2024-12-05T12:04:10,108 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073741941_1117 (size=4695811) 2024-12-05T12:04:10,116 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073741942_1118 (size=232957) 2024-12-05T12:04:10,116 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073741942_1118 (size=232957) 2024-12-05T12:04:10,117 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073741942_1118 (size=232957) 2024-12-05T12:04:10,124 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073741943_1119 (size=127628) 2024-12-05T12:04:10,124 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073741943_1119 (size=127628) 2024-12-05T12:04:10,124 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073741943_1119 (size=127628) 2024-12-05T12:04:10,137 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073741944_1120 (size=20406) 2024-12-05T12:04:10,137 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073741944_1120 (size=20406) 2024-12-05T12:04:10,137 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073741944_1120 (size=20406) 2024-12-05T12:04:10,155 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073741945_1121 (size=5175431) 2024-12-05T12:04:10,155 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073741945_1121 (size=5175431) 2024-12-05T12:04:10,155 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073741945_1121 (size=5175431) 2024-12-05T12:04:10,169 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073741946_1122 (size=217634) 2024-12-05T12:04:10,169 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073741946_1122 (size=217634) 2024-12-05T12:04:10,170 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073741946_1122 (size=217634) 2024-12-05T12:04:10,186 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073741947_1123 (size=1832290) 2024-12-05T12:04:10,187 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073741947_1123 (size=1832290) 2024-12-05T12:04:10,187 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073741947_1123 (size=1832290) 2024-12-05T12:04:10,236 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073741948_1124 (size=6425023) 2024-12-05T12:04:10,236 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073741948_1124 (size=6425023) 2024-12-05T12:04:10,237 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073741948_1124 (size=6425023) 2024-12-05T12:04:10,256 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073741949_1125 (size=322274) 2024-12-05T12:04:10,256 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073741949_1125 (size=322274) 2024-12-05T12:04:10,256 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073741949_1125 (size=322274) 2024-12-05T12:04:10,266 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073741950_1126 (size=503880) 2024-12-05T12:04:10,266 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073741950_1126 (size=503880) 2024-12-05T12:04:10,266 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073741950_1126 (size=503880) 2024-12-05T12:04:10,277 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733400137807_0001_000001 (auth:SIMPLE) from 127.0.0.1:56938 2024-12-05T12:04:10,290 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073741951_1127 (size=29229) 2024-12-05T12:04:10,290 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073741951_1127 (size=29229) 2024-12-05T12:04:10,291 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073741951_1127 (size=29229) 2024-12-05T12:04:10,293 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_693627247/yarn-7091037402/MiniMRCluster_693627247-localDir-nm-1_0/usercache/jenkins/appcache/application_1733400137807_0001/container_1733400137807_0001_01_000001/launch_container.sh] 2024-12-05T12:04:10,293 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_693627247/yarn-7091037402/MiniMRCluster_693627247-localDir-nm-1_0/usercache/jenkins/appcache/application_1733400137807_0001/container_1733400137807_0001_01_000001/container_tokens] 2024-12-05T12:04:10,293 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_693627247/yarn-7091037402/MiniMRCluster_693627247-localDir-nm-1_0/usercache/jenkins/appcache/application_1733400137807_0001/container_1733400137807_0001_01_000001/sysfs] 2024-12-05T12:04:10,305 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073741952_1128 (size=24096) 2024-12-05T12:04:10,305 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073741952_1128 (size=24096) 2024-12-05T12:04:10,306 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073741952_1128 (size=24096) 2024-12-05T12:04:10,317 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073741953_1129 (size=111872) 2024-12-05T12:04:10,317 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073741953_1129 (size=111872) 2024-12-05T12:04:10,317 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073741953_1129 (size=111872) 2024-12-05T12:04:10,332 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073741954_1130 (size=45609) 2024-12-05T12:04:10,332 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073741954_1130 (size=45609) 2024-12-05T12:04:10,332 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073741954_1130 (size=45609) 2024-12-05T12:04:10,341 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073741955_1131 (size=136454) 2024-12-05T12:04:10,342 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073741955_1131 (size=136454) 2024-12-05T12:04:10,342 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073741955_1131 (size=136454) 2024-12-05T12:04:10,343 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-12-05T12:04:10,346 INFO [Time-limited test {}] snapshot.ExportSnapshot(663): Loading Snapshot 'snaptb0-testExportWithTargetName' hfile list 2024-12-05T12:04:10,348 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=0 size=14.4 K 2024-12-05T12:04:10,349 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=1 size=7.9 K 2024-12-05T12:04:10,349 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=2 size=6.0 K 2024-12-05T12:04:10,349 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=3 size=5.1 K 2024-12-05T12:04:10,360 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073741956_1132 (size=1031) 2024-12-05T12:04:10,360 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073741956_1132 (size=1031) 2024-12-05T12:04:10,360 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithTargetName 2024-12-05T12:04:10,361 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073741956_1132 (size=1031) 2024-12-05T12:04:10,361 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithTargetName Metrics about Tables on a single HBase RegionServer 2024-12-05T12:04:10,361 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testExportFileSystemStateWithSplitRegion 2024-12-05T12:04:10,362 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithSplitRegion 2024-12-05T12:04:10,371 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073741957_1133 (size=35) 2024-12-05T12:04:10,372 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073741957_1133 (size=35) 2024-12-05T12:04:10,372 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073741957_1133 (size=35) 2024-12-05T12:04:10,397 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073741958_1134 (size=304001) 2024-12-05T12:04:10,398 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073741958_1134 (size=304001) 2024-12-05T12:04:10,399 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073741958_1134 (size=304001) 2024-12-05T12:04:10,420 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-05T12:04:10,420 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-05T12:04:11,109 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733400137807_0002_000001 (auth:SIMPLE) from 127.0.0.1:38506 2024-12-05T12:04:11,759 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-05T12:04:12,799 WARN [regionserver/80666a11a09f:0.Chore.1 {}] hbase.ExecutorStatusChore(69): RS_COMPACTED_FILES_DISCHARGER's size info, queued: 4, running: 0 2024-12-05T12:04:16,817 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733400137807_0002_000001 (auth:SIMPLE) from 127.0.0.1:32858 2024-12-05T12:04:17,032 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073741959_1135 (size=349699) 2024-12-05T12:04:17,032 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073741959_1135 (size=349699) 2024-12-05T12:04:17,033 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073741959_1135 (size=349699) 2024-12-05T12:04:19,228 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733400137807_0002_000001 (auth:SIMPLE) from 127.0.0.1:41236 2024-12-05T12:04:19,229 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733400137807_0002_000001 (auth:SIMPLE) from 127.0.0.1:54924 2024-12-05T12:04:19,437 DEBUG [master/80666a11a09f:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region e8db6311889743f955ba06d96d2d3405 changed from -1.0 to 0.0, refreshing cache 2024-12-05T12:04:19,437 DEBUG [master/80666a11a09f:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 5330e26b609ebe4f4739554721614515 changed from -1.0 to 0.0, refreshing cache 2024-12-05T12:04:19,918 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733400137807_0002_000001 (auth:SIMPLE) from 127.0.0.1:54934 2024-12-05T12:04:19,935 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733400137807_0002_000001 (auth:SIMPLE) from 127.0.0.1:41238 2024-12-05T12:04:22,276 WARN [NM Event dispatcher {}] containermanager.ContainerManagerImpl(1784): couldn't find container container_1733400137807_0002_01_000006 while processing FINISH_CONTAINERS event 2024-12-05T12:04:26,156 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073741960_1136 (size=14747) 2024-12-05T12:04:26,157 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073741960_1136 (size=14747) 2024-12-05T12:04:26,159 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073741960_1136 (size=14747) 2024-12-05T12:04:26,331 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073741962_1138 (size=6118) 2024-12-05T12:04:26,333 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073741962_1138 (size=6118) 2024-12-05T12:04:26,333 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073741962_1138 (size=6118) 2024-12-05T12:04:26,385 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_693627247/yarn-7091037402/MiniMRCluster_693627247-localDir-nm-0_3/usercache/jenkins/appcache/application_1733400137807_0002/container_1733400137807_0002_01_000002/launch_container.sh] 2024-12-05T12:04:26,385 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_693627247/yarn-7091037402/MiniMRCluster_693627247-localDir-nm-0_3/usercache/jenkins/appcache/application_1733400137807_0002/container_1733400137807_0002_01_000002/container_tokens] 2024-12-05T12:04:26,386 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_693627247/yarn-7091037402/MiniMRCluster_693627247-localDir-nm-0_3/usercache/jenkins/appcache/application_1733400137807_0002/container_1733400137807_0002_01_000002/sysfs] 2024-12-05T12:04:26,573 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_693627247/yarn-7091037402/MiniMRCluster_693627247-localDir-nm-0_3/usercache/jenkins/appcache/application_1733400137807_0002/container_1733400137807_0002_01_000004/launch_container.sh] 2024-12-05T12:04:26,573 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_693627247/yarn-7091037402/MiniMRCluster_693627247-localDir-nm-0_3/usercache/jenkins/appcache/application_1733400137807_0002/container_1733400137807_0002_01_000004/container_tokens] 2024-12-05T12:04:26,573 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_693627247/yarn-7091037402/MiniMRCluster_693627247-localDir-nm-0_3/usercache/jenkins/appcache/application_1733400137807_0002/container_1733400137807_0002_01_000004/sysfs] 2024-12-05T12:04:27,665 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073741963_1139 (size=5172) 2024-12-05T12:04:27,665 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073741963_1139 (size=5172) 2024-12-05T12:04:27,666 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073741963_1139 (size=5172) 2024-12-05T12:04:27,779 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073741964_1140 (size=8102) 2024-12-05T12:04:27,779 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073741964_1140 (size=8102) 2024-12-05T12:04:27,780 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073741964_1140 (size=8102) 2024-12-05T12:04:27,840 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073741961_1137 (size=31744) 2024-12-05T12:04:27,840 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073741961_1137 (size=31744) 2024-12-05T12:04:27,841 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073741961_1137 (size=31744) 2024-12-05T12:04:27,858 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073741965_1141 (size=465) 2024-12-05T12:04:27,858 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073741965_1141 (size=465) 2024-12-05T12:04:27,859 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073741965_1141 (size=465) 2024-12-05T12:04:27,890 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073741966_1142 (size=31744) 2024-12-05T12:04:27,890 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073741966_1142 (size=31744) 2024-12-05T12:04:27,890 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073741966_1142 (size=31744) 2024-12-05T12:04:27,912 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_693627247/yarn-7091037402/MiniMRCluster_693627247-localDir-nm-1_3/usercache/jenkins/appcache/application_1733400137807_0002/container_1733400137807_0002_01_000003/launch_container.sh] 2024-12-05T12:04:27,912 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_693627247/yarn-7091037402/MiniMRCluster_693627247-localDir-nm-1_3/usercache/jenkins/appcache/application_1733400137807_0002/container_1733400137807_0002_01_000003/container_tokens] 2024-12-05T12:04:27,912 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_693627247/yarn-7091037402/MiniMRCluster_693627247-localDir-nm-1_3/usercache/jenkins/appcache/application_1733400137807_0002/container_1733400137807_0002_01_000003/sysfs] 2024-12-05T12:04:27,915 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073741967_1143 (size=349699) 2024-12-05T12:04:27,915 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073741967_1143 (size=349699) 2024-12-05T12:04:27,916 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073741967_1143 (size=349699) 2024-12-05T12:04:27,929 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733400137807_0002_000001 (auth:SIMPLE) from 127.0.0.1:34836 2024-12-05T12:04:27,936 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733400137807_0002_000001 (auth:SIMPLE) from 127.0.0.1:34838 2024-12-05T12:04:29,824 INFO [Time-limited test {}] snapshot.ExportSnapshot(1219): Finalize the Snapshot Export 2024-12-05T12:04:29,826 INFO [Time-limited test {}] snapshot.ExportSnapshot(1230): Verify the exported snapshot's expiration status and integrity. 2024-12-05T12:04:29,832 INFO [Time-limited test {}] snapshot.ExportSnapshot(1236): Export Completed: testExportWithTargetName 2024-12-05T12:04:29,833 INFO [Time-limited test {}] snapshot.TestExportSnapshot(409): Exported snapshot 2024-12-05T12:04:29,833 INFO [Time-limited test {}] snapshot.TestExportSnapshot(420): Verified filesystem state 2024-12-05T12:04:29,833 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-954888707_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/.hbase-snapshot/snaptb0-testExportWithTargetName at hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/.hbase-snapshot/snaptb0-testExportWithTargetName 2024-12-05T12:04:29,834 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/.hbase-snapshot/snaptb0-testExportWithTargetName/.snapshotinfo 2024-12-05T12:04:29,834 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/.hbase-snapshot/snaptb0-testExportWithTargetName/data.manifest 2024-12-05T12:04:29,834 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-954888707_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/export-test/export-1733400248819/.hbase-snapshot/testExportWithTargetName at hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/export-test/export-1733400248819/.hbase-snapshot/testExportWithTargetName 2024-12-05T12:04:29,834 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/export-test/export-1733400248819/.hbase-snapshot/testExportWithTargetName/.snapshotinfo 2024-12-05T12:04:29,834 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/export-test/export-1733400248819/.hbase-snapshot/testExportWithTargetName/data.manifest 2024-12-05T12:04:29,842 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.2 disable testtb-testExportWithTargetName 2024-12-05T12:04:29,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] procedure2.ProcedureExecutor(1139): Stored pid=56, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testtb-testExportWithTargetName 2024-12-05T12:04:29,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=56 2024-12-05T12:04:29,846 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithTargetName","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733400269846"}]},"ts":"1733400269846"} 2024-12-05T12:04:29,848 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithTargetName, state=DISABLING in hbase:meta 2024-12-05T12:04:29,849 INFO [PEWorker-5 {}] procedure.DisableTableProcedure(284): Set testtb-testExportWithTargetName to state=DISABLING 2024-12-05T12:04:29,850 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=57, ppid=56, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportWithTargetName}] 2024-12-05T12:04:29,853 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=58, ppid=57, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=5330e26b609ebe4f4739554721614515, UNASSIGN}, {pid=59, ppid=57, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=e8db6311889743f955ba06d96d2d3405, UNASSIGN}] 2024-12-05T12:04:29,854 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=58, ppid=57, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=5330e26b609ebe4f4739554721614515, UNASSIGN 2024-12-05T12:04:29,854 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=59, ppid=57, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=e8db6311889743f955ba06d96d2d3405, UNASSIGN 2024-12-05T12:04:29,855 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=59 updating hbase:meta row=e8db6311889743f955ba06d96d2d3405, regionState=CLOSING, regionLocation=80666a11a09f,37629,1733400130958 2024-12-05T12:04:29,855 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=58 updating hbase:meta row=5330e26b609ebe4f4739554721614515, regionState=CLOSING, regionLocation=80666a11a09f,42913,1733400130845 2024-12-05T12:04:29,857 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=59, ppid=57, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=e8db6311889743f955ba06d96d2d3405, UNASSIGN because future has completed 2024-12-05T12:04:29,857 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-05T12:04:29,858 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=60, ppid=59, state=RUNNABLE, hasLock=false; CloseRegionProcedure e8db6311889743f955ba06d96d2d3405, server=80666a11a09f,37629,1733400130958}] 2024-12-05T12:04:29,859 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=58, ppid=57, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=5330e26b609ebe4f4739554721614515, UNASSIGN because future has completed 2024-12-05T12:04:29,859 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-05T12:04:29,859 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=61, ppid=58, state=RUNNABLE, hasLock=false; CloseRegionProcedure 5330e26b609ebe4f4739554721614515, server=80666a11a09f,42913,1733400130845}] 2024-12-05T12:04:29,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=56 2024-12-05T12:04:30,011 INFO [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] handler.UnassignRegionHandler(122): Close e8db6311889743f955ba06d96d2d3405 2024-12-05T12:04:30,011 DEBUG [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-05T12:04:30,011 DEBUG [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] regionserver.HRegion(1722): Closing e8db6311889743f955ba06d96d2d3405, disabling compactions & flushes 2024-12-05T12:04:30,011 INFO [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] regionserver.HRegion(1755): Closing region testtb-testExportWithTargetName,1,1733400247094.e8db6311889743f955ba06d96d2d3405. 2024-12-05T12:04:30,011 DEBUG [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithTargetName,1,1733400247094.e8db6311889743f955ba06d96d2d3405. 2024-12-05T12:04:30,011 DEBUG [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithTargetName,1,1733400247094.e8db6311889743f955ba06d96d2d3405. after waiting 0 ms 2024-12-05T12:04:30,011 DEBUG [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithTargetName,1,1733400247094.e8db6311889743f955ba06d96d2d3405. 2024-12-05T12:04:30,012 INFO [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] handler.UnassignRegionHandler(122): Close 5330e26b609ebe4f4739554721614515 2024-12-05T12:04:30,012 DEBUG [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-05T12:04:30,012 DEBUG [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] regionserver.HRegion(1722): Closing 5330e26b609ebe4f4739554721614515, disabling compactions & flushes 2024-12-05T12:04:30,012 INFO [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] regionserver.HRegion(1755): Closing region testtb-testExportWithTargetName,,1733400247094.5330e26b609ebe4f4739554721614515. 2024-12-05T12:04:30,012 DEBUG [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithTargetName,,1733400247094.5330e26b609ebe4f4739554721614515. 2024-12-05T12:04:30,012 DEBUG [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithTargetName,,1733400247094.5330e26b609ebe4f4739554721614515. after waiting 0 ms 2024-12-05T12:04:30,012 DEBUG [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithTargetName,,1733400247094.5330e26b609ebe4f4739554721614515. 2024-12-05T12:04:30,019 DEBUG [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportWithTargetName/e8db6311889743f955ba06d96d2d3405/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-05T12:04:30,019 DEBUG [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportWithTargetName/5330e26b609ebe4f4739554721614515/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-05T12:04:30,019 DEBUG [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-05T12:04:30,020 DEBUG [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-05T12:04:30,020 INFO [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] regionserver.HRegion(1973): Closed testtb-testExportWithTargetName,,1733400247094.5330e26b609ebe4f4739554721614515. 2024-12-05T12:04:30,020 INFO [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] regionserver.HRegion(1973): Closed testtb-testExportWithTargetName,1,1733400247094.e8db6311889743f955ba06d96d2d3405. 2024-12-05T12:04:30,020 DEBUG [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] regionserver.HRegion(1676): Region close journal for 5330e26b609ebe4f4739554721614515: Waiting for close lock at 1733400270012Running coprocessor pre-close hooks at 1733400270012Disabling compacts and flushes for region at 1733400270012Disabling writes for close at 1733400270012Writing region close event to WAL at 1733400270013 (+1 ms)Running coprocessor post-close hooks at 1733400270019 (+6 ms)Closed at 1733400270020 (+1 ms) 2024-12-05T12:04:30,020 DEBUG [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] regionserver.HRegion(1676): Region close journal for e8db6311889743f955ba06d96d2d3405: Waiting for close lock at 1733400270011Running coprocessor pre-close hooks at 1733400270011Disabling compacts and flushes for region at 1733400270011Disabling writes for close at 1733400270011Writing region close event to WAL at 1733400270012 (+1 ms)Running coprocessor post-close hooks at 1733400270019 (+7 ms)Closed at 1733400270020 (+1 ms) 2024-12-05T12:04:30,022 INFO [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] handler.UnassignRegionHandler(157): Closed 5330e26b609ebe4f4739554721614515 2024-12-05T12:04:30,022 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=58 updating hbase:meta row=5330e26b609ebe4f4739554721614515, regionState=CLOSED 2024-12-05T12:04:30,024 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=59 updating hbase:meta row=e8db6311889743f955ba06d96d2d3405, regionState=CLOSED 2024-12-05T12:04:30,024 INFO [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] handler.UnassignRegionHandler(157): Closed e8db6311889743f955ba06d96d2d3405 2024-12-05T12:04:30,025 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=61, ppid=58, state=RUNNABLE, hasLock=false; CloseRegionProcedure 5330e26b609ebe4f4739554721614515, server=80666a11a09f,42913,1733400130845 because future has completed 2024-12-05T12:04:30,026 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=60, ppid=59, state=RUNNABLE, hasLock=false; CloseRegionProcedure e8db6311889743f955ba06d96d2d3405, server=80666a11a09f,37629,1733400130958 because future has completed 2024-12-05T12:04:30,030 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=61, resume processing ppid=58 2024-12-05T12:04:30,030 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=61, ppid=58, state=SUCCESS, hasLock=false; CloseRegionProcedure 5330e26b609ebe4f4739554721614515, server=80666a11a09f,42913,1733400130845 in 167 msec 2024-12-05T12:04:30,031 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=58, ppid=57, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=5330e26b609ebe4f4739554721614515, UNASSIGN in 178 msec 2024-12-05T12:04:30,031 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=60, resume processing ppid=59 2024-12-05T12:04:30,031 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=60, ppid=59, state=SUCCESS, hasLock=false; CloseRegionProcedure e8db6311889743f955ba06d96d2d3405, server=80666a11a09f,37629,1733400130958 in 170 msec 2024-12-05T12:04:30,034 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=59, resume processing ppid=57 2024-12-05T12:04:30,034 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=59, ppid=57, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=e8db6311889743f955ba06d96d2d3405, UNASSIGN in 179 msec 2024-12-05T12:04:30,036 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=57, resume processing ppid=56 2024-12-05T12:04:30,036 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=57, ppid=56, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportWithTargetName in 185 msec 2024-12-05T12:04:30,037 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithTargetName","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733400270037"}]},"ts":"1733400270037"} 2024-12-05T12:04:30,039 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithTargetName, state=DISABLED in hbase:meta 2024-12-05T12:04:30,040 INFO [PEWorker-3 {}] procedure.DisableTableProcedure(296): Set testtb-testExportWithTargetName to state=DISABLED 2024-12-05T12:04:30,042 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=56, state=SUCCESS, hasLock=false; DisableTableProcedure table=testtb-testExportWithTargetName in 198 msec 2024-12-05T12:04:30,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=56 2024-12-05T12:04:30,159 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testtb-testExportWithTargetName completed 2024-12-05T12:04:30,160 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.2 delete testtb-testExportWithTargetName 2024-12-05T12:04:30,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] procedure2.ProcedureExecutor(1139): Stored pid=62, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testtb-testExportWithTargetName 2024-12-05T12:04:30,164 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=62, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportWithTargetName 2024-12-05T12:04:30,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] access.PermissionStorage(261): Removing permissions of removed table testtb-testExportWithTargetName 2024-12-05T12:04:30,165 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=62, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testtb-testExportWithTargetName 2024-12-05T12:04:30,168 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42305 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testtb-testExportWithTargetName 2024-12-05T12:04:30,171 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportWithTargetName/5330e26b609ebe4f4739554721614515 2024-12-05T12:04:30,171 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportWithTargetName/e8db6311889743f955ba06d96d2d3405 2024-12-05T12:04:30,173 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportWithTargetName/5330e26b609ebe4f4739554721614515/cf, FileablePath, hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportWithTargetName/5330e26b609ebe4f4739554721614515/recovered.edits] 2024-12-05T12:04:30,173 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportWithTargetName/e8db6311889743f955ba06d96d2d3405/cf, FileablePath, hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportWithTargetName/e8db6311889743f955ba06d96d2d3405/recovered.edits] 2024-12-05T12:04:30,177 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportWithTargetName/5330e26b609ebe4f4739554721614515/cf/87c639b6d3304c39bc8a74dead0da691 to hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/archive/data/default/testtb-testExportWithTargetName/5330e26b609ebe4f4739554721614515/cf/87c639b6d3304c39bc8a74dead0da691 2024-12-05T12:04:30,177 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportWithTargetName/e8db6311889743f955ba06d96d2d3405/cf/a0b07f6c133549778217935ac9fc98db to hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/archive/data/default/testtb-testExportWithTargetName/e8db6311889743f955ba06d96d2d3405/cf/a0b07f6c133549778217935ac9fc98db 2024-12-05T12:04:30,179 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportWithTargetName/5330e26b609ebe4f4739554721614515/recovered.edits/9.seqid to hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/archive/data/default/testtb-testExportWithTargetName/5330e26b609ebe4f4739554721614515/recovered.edits/9.seqid 2024-12-05T12:04:30,179 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportWithTargetName/e8db6311889743f955ba06d96d2d3405/recovered.edits/9.seqid to hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/archive/data/default/testtb-testExportWithTargetName/e8db6311889743f955ba06d96d2d3405/recovered.edits/9.seqid 2024-12-05T12:04:30,180 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportWithTargetName/5330e26b609ebe4f4739554721614515 2024-12-05T12:04:30,180 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportWithTargetName/e8db6311889743f955ba06d96d2d3405 2024-12-05T12:04:30,180 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(313): Archived testtb-testExportWithTargetName regions 2024-12-05T12:04:30,180 DEBUG [PEWorker-2 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/mobdir/data/default/testtb-testExportWithTargetName/90c8eeaaf01a24f585da11044309be71 2024-12-05T12:04:30,181 DEBUG [PEWorker-2 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/mobdir/data/default/testtb-testExportWithTargetName/90c8eeaaf01a24f585da11044309be71/cf] 2024-12-05T12:04:30,184 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/mobdir/data/default/testtb-testExportWithTargetName/90c8eeaaf01a24f585da11044309be71/cf/c4ca4238a0b923820dcc509a6f75849b20241205550d85b1be81425bb8e9b0d45f6fd8dd_e8db6311889743f955ba06d96d2d3405 to hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/archive/data/default/testtb-testExportWithTargetName/90c8eeaaf01a24f585da11044309be71/cf/c4ca4238a0b923820dcc509a6f75849b20241205550d85b1be81425bb8e9b0d45f6fd8dd_e8db6311889743f955ba06d96d2d3405 2024-12-05T12:04:30,185 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/mobdir/data/default/testtb-testExportWithTargetName/90c8eeaaf01a24f585da11044309be71/cf/d41d8cd98f00b204e9800998ecf8427e20241205e4c1ec223ce74b09b4082503ef010ec9_5330e26b609ebe4f4739554721614515 to hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/archive/data/default/testtb-testExportWithTargetName/90c8eeaaf01a24f585da11044309be71/cf/d41d8cd98f00b204e9800998ecf8427e20241205e4c1ec223ce74b09b4082503ef010ec9_5330e26b609ebe4f4739554721614515 2024-12-05T12:04:30,186 DEBUG [PEWorker-2 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/mobdir/data/default/testtb-testExportWithTargetName/90c8eeaaf01a24f585da11044309be71 2024-12-05T12:04:30,188 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=62, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testtb-testExportWithTargetName 2024-12-05T12:04:30,190 WARN [PEWorker-2 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 2 rows of testtb-testExportWithTargetName from hbase:meta 2024-12-05T12:04:30,192 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42305-0x101a6a890970003, quorum=127.0.0.1:49240, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithTargetName 2024-12-05T12:04:30,192 DEBUG [pool-63-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42913-0x101a6a890970001, quorum=127.0.0.1:49240, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithTargetName 2024-12-05T12:04:30,192 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34947-0x101a6a890970000, quorum=127.0.0.1:49240, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithTargetName 2024-12-05T12:04:30,192 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37629-0x101a6a890970002, quorum=127.0.0.1:49240, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithTargetName 2024-12-05T12:04:30,193 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithTargetName with data PBUF 2024-12-05T12:04:30,193 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithTargetName with data PBUF 2024-12-05T12:04:30,193 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(407): Removing 'testtb-testExportWithTargetName' descriptor. 2024-12-05T12:04:30,193 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithTargetName with data PBUF 2024-12-05T12:04:30,193 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithTargetName with data PBUF 2024-12-05T12:04:30,194 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=62, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testtb-testExportWithTargetName 2024-12-05T12:04:30,194 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(397): Removing 'testtb-testExportWithTargetName' from region states. 2024-12-05T12:04:30,194 DEBUG [PEWorker-2 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportWithTargetName,,1733400247094.5330e26b609ebe4f4739554721614515.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733400270194"}]},"ts":"9223372036854775807"} 2024-12-05T12:04:30,194 DEBUG [PEWorker-2 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportWithTargetName,1,1733400247094.e8db6311889743f955ba06d96d2d3405.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733400270194"}]},"ts":"9223372036854775807"} 2024-12-05T12:04:30,197 INFO [PEWorker-2 {}] assignment.RegionStateStore(562): Deleted 2 regions from META 2024-12-05T12:04:30,197 DEBUG [PEWorker-2 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => 5330e26b609ebe4f4739554721614515, NAME => 'testtb-testExportWithTargetName,,1733400247094.5330e26b609ebe4f4739554721614515.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => e8db6311889743f955ba06d96d2d3405, NAME => 'testtb-testExportWithTargetName,1,1733400247094.e8db6311889743f955ba06d96d2d3405.', STARTKEY => '1', ENDKEY => ''}] 2024-12-05T12:04:30,197 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(401): Marking 'testtb-testExportWithTargetName' as deleted. 2024-12-05T12:04:30,197 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testtb-testExportWithTargetName","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733400270197"}]},"ts":"9223372036854775807"} 2024-12-05T12:04:30,199 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(867): Deleted table testtb-testExportWithTargetName state from META 2024-12-05T12:04:30,201 DEBUG [pool-63-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42913-0x101a6a890970001, quorum=127.0.0.1:49240, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithTargetName 2024-12-05T12:04:30,201 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42305-0x101a6a890970003, quorum=127.0.0.1:49240, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithTargetName 2024-12-05T12:04:30,201 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37629-0x101a6a890970002, quorum=127.0.0.1:49240, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithTargetName 2024-12-05T12:04:30,201 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42305-0x101a6a890970003, quorum=127.0.0.1:49240, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-05T12:04:30,201 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37629-0x101a6a890970002, quorum=127.0.0.1:49240, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-05T12:04:30,201 DEBUG [pool-63-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42913-0x101a6a890970001, quorum=127.0.0.1:49240, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-05T12:04:30,202 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34947-0x101a6a890970000, quorum=127.0.0.1:49240, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithTargetName 2024-12-05T12:04:30,202 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34947-0x101a6a890970000, quorum=127.0.0.1:49240, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-05T12:04:30,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=62 2024-12-05T12:04:30,203 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(133): Finished pid=62, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportWithTargetName 2024-12-05T12:04:30,205 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=62, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testtb-testExportWithTargetName in 43 msec 2024-12-05T12:04:30,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=62 2024-12-05T12:04:30,308 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testtb-testExportWithTargetName 2024-12-05T12:04:30,308 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testtb-testExportWithTargetName completed 2024-12-05T12:04:30,315 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "emptySnaptb0-testExportWithTargetName" type: DISABLED 2024-12-05T12:04:30,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] snapshot.SnapshotManager(381): Deleting snapshot: emptySnaptb0-testExportWithTargetName 2024-12-05T12:04:30,319 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "snaptb0-testExportWithTargetName" type: DISABLED 2024-12-05T12:04:30,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] snapshot.SnapshotManager(381): Deleting snapshot: snaptb0-testExportWithTargetName 2024-12-05T12:04:30,344 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestMobSecureExportSnapshot#testExportWithTargetName Thread=798 (was 757) Potentially hanging thread: Async disk worker #1 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/b25dcafd-a00c-c924-93f8-4d057d2ca201/cluster_25076363-b5ee-b0f5-182c-ca76f44a18a7/data/data4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LogDeleter #0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:41331 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-7 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_COMPACTED_FILES_DISCHARGER-regionserver/80666a11a09f:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-767458495_1 at /127.0.0.1:34852 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async disk worker #1 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/b25dcafd-a00c-c924-93f8-4d057d2ca201/cluster_25076363-b5ee-b0f5-182c-ca76f44a18a7/data/data1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1055630025) connection to localhost/127.0.0.1:41331 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Client (1055630025) connection to localhost/127.0.0.1:43465 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-954888707_22 at /127.0.0.1:34882 [Waiting for operation #7] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:41331 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async disk worker #1 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/b25dcafd-a00c-c924-93f8-4d057d2ca201/cluster_25076363-b5ee-b0f5-182c-ca76f44a18a7/data/data5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async disk worker #1 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/b25dcafd-a00c-c924-93f8-4d057d2ca201/cluster_25076363-b5ee-b0f5-182c-ca76f44a18a7/data/data6 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool.commonPool-worker-4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: Async disk worker #1 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/b25dcafd-a00c-c924-93f8-4d057d2ca201/cluster_25076363-b5ee-b0f5-182c-ca76f44a18a7/data/data3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_COMPACTED_FILES_DISCHARGER-regionserver/80666a11a09f:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Thread-2199 java.base@17.0.11/java.io.FileInputStream.readBytes(Native Method) java.base@17.0.11/java.io.FileInputStream.read(FileInputStream.java:276) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:282) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/sun.nio.cs.StreamDecoder.readBytes(StreamDecoder.java:281) java.base@17.0.11/sun.nio.cs.StreamDecoder.implRead(StreamDecoder.java:324) java.base@17.0.11/sun.nio.cs.StreamDecoder.read(StreamDecoder.java:189) java.base@17.0.11/java.io.InputStreamReader.read(InputStreamReader.java:177) java.base@17.0.11/java.io.BufferedReader.fill(BufferedReader.java:162) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:329) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:396) app//org.apache.hadoop.util.Shell$1.run(Shell.java:1025) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-767458495_1 at /127.0.0.1:36090 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-3-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ContainersLauncher #1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher.ContainerLaunch.getContainerPid(ContainerLaunch.java:1062) app//org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher.ContainerCleanup.run(ContainerCleanup.java:119) java.base@17.0.11/java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) java.base@17.0.11/java.util.concurrent.FutureTask.run(FutureTask.java:264) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool.commonPool-worker-5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkUntil(LockSupport.java:410) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1726) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: HFileArchiver-6 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LogDeleter #0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1055630025) connection to localhost/127.0.0.1:41331 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-954888707_22 at /127.0.0.1:36106 [Waiting for operation #4] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async disk worker #1 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/b25dcafd-a00c-c924-93f8-4d057d2ca201/cluster_25076363-b5ee-b0f5-182c-ca76f44a18a7/data/data2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ContainersLauncher #1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferStack.transfer(SynchronousQueue.java:401) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:43465 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: process reaper (pid 130256) java.base@17.0.11/java.lang.ProcessHandleImpl.waitForProcessExit0(Native Method) java.base@17.0.11/java.lang.ProcessHandleImpl$1.run(ProcessHandleImpl.java:150) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_COMPACTED_FILES_DISCHARGER-regionserver/80666a11a09f:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-14 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:62) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:883) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ContainersLauncher #2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferStack.transfer(SynchronousQueue.java:401) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=815 (was 804) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=694 (was 467) - SystemLoadAverage LEAK? -, ProcessCount=16 (was 19), AvailableMemoryMB=2599 (was 597) - AvailableMemoryMB LEAK? - 2024-12-05T12:04:30,345 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=798 is superior to 500 2024-12-05T12:04:30,361 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithTargetName 2024-12-05T12:04:30,365 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestMobSecureExportSnapshot#testExportWithResetTtl Thread=798, OpenFileDescriptor=815, MaxFileDescriptor=1048576, SystemLoadAverage=694, ProcessCount=16, AvailableMemoryMB=2598 2024-12-05T12:04:30,365 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=798 is superior to 500 2024-12-05T12:04:30,367 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'testtb-testExportWithResetTtl', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-05T12:04:30,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] procedure2.ProcedureExecutor(1139): Stored pid=63, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testtb-testExportWithResetTtl 2024-12-05T12:04:30,388 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=63, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportWithResetTtl execute state=CREATE_TABLE_PRE_OPERATION 2024-12-05T12:04:30,390 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testExportWithResetTtl" procId is: 63 2024-12-05T12:04:30,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=63 2024-12-05T12:04:30,398 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=63, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testtb-testExportWithResetTtl execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-05T12:04:30,428 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073741968_1144 (size=440) 2024-12-05T12:04:30,428 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073741968_1144 (size=440) 2024-12-05T12:04:30,429 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073741968_1144 (size=440) 2024-12-05T12:04:30,430 INFO [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 181d56aec8b6c6c419fc4e8e3161c451, NAME => 'testtb-testExportWithResetTtl,,1733400270367.181d56aec8b6c6c419fc4e8e3161c451.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testExportWithResetTtl', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300 2024-12-05T12:04:30,433 INFO [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => 3ca2163992a160fdd5ff463a6a484b7f, NAME => 'testtb-testExportWithResetTtl,1,1733400270367.3ca2163992a160fdd5ff463a6a484b7f.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testExportWithResetTtl', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300 2024-12-05T12:04:30,472 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073741969_1145 (size=65) 2024-12-05T12:04:30,472 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073741969_1145 (size=65) 2024-12-05T12:04:30,472 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073741969_1145 (size=65) 2024-12-05T12:04:30,473 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(898): Instantiated testtb-testExportWithResetTtl,1,1733400270367.3ca2163992a160fdd5ff463a6a484b7f.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-05T12:04:30,473 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1722): Closing 3ca2163992a160fdd5ff463a6a484b7f, disabling compactions & flushes 2024-12-05T12:04:30,473 INFO [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1755): Closing region testtb-testExportWithResetTtl,1,1733400270367.3ca2163992a160fdd5ff463a6a484b7f. 2024-12-05T12:04:30,473 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithResetTtl,1,1733400270367.3ca2163992a160fdd5ff463a6a484b7f. 2024-12-05T12:04:30,473 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithResetTtl,1,1733400270367.3ca2163992a160fdd5ff463a6a484b7f. after waiting 0 ms 2024-12-05T12:04:30,473 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithResetTtl,1,1733400270367.3ca2163992a160fdd5ff463a6a484b7f. 2024-12-05T12:04:30,473 INFO [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1973): Closed testtb-testExportWithResetTtl,1,1733400270367.3ca2163992a160fdd5ff463a6a484b7f. 2024-12-05T12:04:30,473 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1676): Region close journal for 3ca2163992a160fdd5ff463a6a484b7f: Waiting for close lock at 1733400270473Disabling compacts and flushes for region at 1733400270473Disabling writes for close at 1733400270473Writing region close event to WAL at 1733400270473Closed at 1733400270473 2024-12-05T12:04:30,480 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073741970_1146 (size=65) 2024-12-05T12:04:30,480 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073741970_1146 (size=65) 2024-12-05T12:04:30,481 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073741970_1146 (size=65) 2024-12-05T12:04:30,482 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(898): Instantiated testtb-testExportWithResetTtl,,1733400270367.181d56aec8b6c6c419fc4e8e3161c451.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-05T12:04:30,482 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1722): Closing 181d56aec8b6c6c419fc4e8e3161c451, disabling compactions & flushes 2024-12-05T12:04:30,482 INFO [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1755): Closing region testtb-testExportWithResetTtl,,1733400270367.181d56aec8b6c6c419fc4e8e3161c451. 2024-12-05T12:04:30,482 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithResetTtl,,1733400270367.181d56aec8b6c6c419fc4e8e3161c451. 2024-12-05T12:04:30,482 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithResetTtl,,1733400270367.181d56aec8b6c6c419fc4e8e3161c451. after waiting 0 ms 2024-12-05T12:04:30,482 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithResetTtl,,1733400270367.181d56aec8b6c6c419fc4e8e3161c451. 2024-12-05T12:04:30,482 INFO [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1973): Closed testtb-testExportWithResetTtl,,1733400270367.181d56aec8b6c6c419fc4e8e3161c451. 2024-12-05T12:04:30,482 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1676): Region close journal for 181d56aec8b6c6c419fc4e8e3161c451: Waiting for close lock at 1733400270482Disabling compacts and flushes for region at 1733400270482Disabling writes for close at 1733400270482Writing region close event to WAL at 1733400270482Closed at 1733400270482 2024-12-05T12:04:30,483 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=63, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testtb-testExportWithResetTtl execute state=CREATE_TABLE_ADD_TO_META 2024-12-05T12:04:30,484 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportWithResetTtl,1,1733400270367.3ca2163992a160fdd5ff463a6a484b7f.","families":{"info":[{"qualifier":"regioninfo","vlen":64,"tag":[],"timestamp":"1733400270483"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733400270483"}]},"ts":"1733400270483"} 2024-12-05T12:04:30,484 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportWithResetTtl,,1733400270367.181d56aec8b6c6c419fc4e8e3161c451.","families":{"info":[{"qualifier":"regioninfo","vlen":64,"tag":[],"timestamp":"1733400270483"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733400270483"}]},"ts":"1733400270483"} 2024-12-05T12:04:30,487 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-12-05T12:04:30,488 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=63, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testtb-testExportWithResetTtl execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-05T12:04:30,488 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733400270488"}]},"ts":"1733400270488"} 2024-12-05T12:04:30,490 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithResetTtl, state=ENABLING in hbase:meta 2024-12-05T12:04:30,490 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(204): Hosts are {80666a11a09f=0} racks are {/default-rack=0} 2024-12-05T12:04:30,497 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-05T12:04:30,497 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-05T12:04:30,497 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-05T12:04:30,497 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-05T12:04:30,497 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-05T12:04:30,497 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-05T12:04:30,497 INFO [PEWorker-4 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-05T12:04:30,497 INFO [PEWorker-4 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-05T12:04:30,497 INFO [PEWorker-4 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-05T12:04:30,497 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-05T12:04:30,498 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=64, ppid=63, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=181d56aec8b6c6c419fc4e8e3161c451, ASSIGN}, {pid=65, ppid=63, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=3ca2163992a160fdd5ff463a6a484b7f, ASSIGN}] 2024-12-05T12:04:30,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=63 2024-12-05T12:04:30,500 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=64, ppid=63, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=181d56aec8b6c6c419fc4e8e3161c451, ASSIGN 2024-12-05T12:04:30,501 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=65, ppid=63, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=3ca2163992a160fdd5ff463a6a484b7f, ASSIGN 2024-12-05T12:04:30,501 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(269): Starting pid=64, ppid=63, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=181d56aec8b6c6c419fc4e8e3161c451, ASSIGN; state=OFFLINE, location=80666a11a09f,37629,1733400130958; forceNewPlan=false, retain=false 2024-12-05T12:04:30,502 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(269): Starting pid=65, ppid=63, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=3ca2163992a160fdd5ff463a6a484b7f, ASSIGN; state=OFFLINE, location=80666a11a09f,42305,1733400131012; forceNewPlan=false, retain=false 2024-12-05T12:04:30,652 INFO [80666a11a09f:34947 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-12-05T12:04:30,652 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=64 updating hbase:meta row=181d56aec8b6c6c419fc4e8e3161c451, regionState=OPENING, regionLocation=80666a11a09f,37629,1733400130958 2024-12-05T12:04:30,652 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=65 updating hbase:meta row=3ca2163992a160fdd5ff463a6a484b7f, regionState=OPENING, regionLocation=80666a11a09f,42305,1733400131012 2024-12-05T12:04:30,654 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=64, ppid=63, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=181d56aec8b6c6c419fc4e8e3161c451, ASSIGN because future has completed 2024-12-05T12:04:30,655 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=66, ppid=64, state=RUNNABLE, hasLock=false; OpenRegionProcedure 181d56aec8b6c6c419fc4e8e3161c451, server=80666a11a09f,37629,1733400130958}] 2024-12-05T12:04:30,656 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=65, ppid=63, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=3ca2163992a160fdd5ff463a6a484b7f, ASSIGN because future has completed 2024-12-05T12:04:30,656 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=67, ppid=65, state=RUNNABLE, hasLock=false; OpenRegionProcedure 3ca2163992a160fdd5ff463a6a484b7f, server=80666a11a09f,42305,1733400131012}] 2024-12-05T12:04:30,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=63 2024-12-05T12:04:30,810 INFO [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] handler.AssignRegionHandler(132): Open testtb-testExportWithResetTtl,,1733400270367.181d56aec8b6c6c419fc4e8e3161c451. 2024-12-05T12:04:30,810 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(7752): Opening region: {ENCODED => 181d56aec8b6c6c419fc4e8e3161c451, NAME => 'testtb-testExportWithResetTtl,,1733400270367.181d56aec8b6c6c419fc4e8e3161c451.', STARTKEY => '', ENDKEY => '1'} 2024-12-05T12:04:30,811 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportWithResetTtl,,1733400270367.181d56aec8b6c6c419fc4e8e3161c451. service=AccessControlService 2024-12-05T12:04:30,811 INFO [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-05T12:04:30,811 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportWithResetTtl 181d56aec8b6c6c419fc4e8e3161c451 2024-12-05T12:04:30,811 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(898): Instantiated testtb-testExportWithResetTtl,,1733400270367.181d56aec8b6c6c419fc4e8e3161c451.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-05T12:04:30,811 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(7794): checking encryption for 181d56aec8b6c6c419fc4e8e3161c451 2024-12-05T12:04:30,811 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(7797): checking classloading for 181d56aec8b6c6c419fc4e8e3161c451 2024-12-05T12:04:30,813 INFO [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] handler.AssignRegionHandler(132): Open testtb-testExportWithResetTtl,1,1733400270367.3ca2163992a160fdd5ff463a6a484b7f. 2024-12-05T12:04:30,813 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(7752): Opening region: {ENCODED => 3ca2163992a160fdd5ff463a6a484b7f, NAME => 'testtb-testExportWithResetTtl,1,1733400270367.3ca2163992a160fdd5ff463a6a484b7f.', STARTKEY => '1', ENDKEY => ''} 2024-12-05T12:04:30,813 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportWithResetTtl,1,1733400270367.3ca2163992a160fdd5ff463a6a484b7f. service=AccessControlService 2024-12-05T12:04:30,814 INFO [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-05T12:04:30,814 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportWithResetTtl 3ca2163992a160fdd5ff463a6a484b7f 2024-12-05T12:04:30,814 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(898): Instantiated testtb-testExportWithResetTtl,1,1733400270367.3ca2163992a160fdd5ff463a6a484b7f.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-05T12:04:30,814 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(7794): checking encryption for 3ca2163992a160fdd5ff463a6a484b7f 2024-12-05T12:04:30,814 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(7797): checking classloading for 3ca2163992a160fdd5ff463a6a484b7f 2024-12-05T12:04:30,819 INFO [StoreOpener-3ca2163992a160fdd5ff463a6a484b7f-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 3ca2163992a160fdd5ff463a6a484b7f 2024-12-05T12:04:30,819 INFO [StoreOpener-181d56aec8b6c6c419fc4e8e3161c451-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 181d56aec8b6c6c419fc4e8e3161c451 2024-12-05T12:04:30,820 INFO [StoreOpener-181d56aec8b6c6c419fc4e8e3161c451-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 181d56aec8b6c6c419fc4e8e3161c451 columnFamilyName cf 2024-12-05T12:04:30,820 INFO [StoreOpener-3ca2163992a160fdd5ff463a6a484b7f-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 3ca2163992a160fdd5ff463a6a484b7f columnFamilyName cf 2024-12-05T12:04:30,822 DEBUG [StoreOpener-3ca2163992a160fdd5ff463a6a484b7f-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T12:04:30,822 DEBUG [StoreOpener-181d56aec8b6c6c419fc4e8e3161c451-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T12:04:30,822 INFO [StoreOpener-181d56aec8b6c6c419fc4e8e3161c451-1 {}] regionserver.HStore(327): Store=181d56aec8b6c6c419fc4e8e3161c451/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-05T12:04:30,822 INFO [StoreOpener-3ca2163992a160fdd5ff463a6a484b7f-1 {}] regionserver.HStore(327): Store=3ca2163992a160fdd5ff463a6a484b7f/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-05T12:04:30,823 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(1038): replaying wal for 3ca2163992a160fdd5ff463a6a484b7f 2024-12-05T12:04:30,823 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(1038): replaying wal for 181d56aec8b6c6c419fc4e8e3161c451 2024-12-05T12:04:30,823 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportWithResetTtl/181d56aec8b6c6c419fc4e8e3161c451 2024-12-05T12:04:30,823 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportWithResetTtl/3ca2163992a160fdd5ff463a6a484b7f 2024-12-05T12:04:30,824 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportWithResetTtl/181d56aec8b6c6c419fc4e8e3161c451 2024-12-05T12:04:30,824 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportWithResetTtl/3ca2163992a160fdd5ff463a6a484b7f 2024-12-05T12:04:30,824 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(1048): stopping wal replay for 3ca2163992a160fdd5ff463a6a484b7f 2024-12-05T12:04:30,824 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(1048): stopping wal replay for 181d56aec8b6c6c419fc4e8e3161c451 2024-12-05T12:04:30,824 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(1060): Cleaning up temporary data for 3ca2163992a160fdd5ff463a6a484b7f 2024-12-05T12:04:30,824 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(1060): Cleaning up temporary data for 181d56aec8b6c6c419fc4e8e3161c451 2024-12-05T12:04:30,826 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(1093): writing seq id for 181d56aec8b6c6c419fc4e8e3161c451 2024-12-05T12:04:30,826 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(1093): writing seq id for 3ca2163992a160fdd5ff463a6a484b7f 2024-12-05T12:04:30,838 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportWithResetTtl/3ca2163992a160fdd5ff463a6a484b7f/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-05T12:04:30,838 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportWithResetTtl/181d56aec8b6c6c419fc4e8e3161c451/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-05T12:04:30,839 INFO [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(1114): Opened 3ca2163992a160fdd5ff463a6a484b7f; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=75231652, jitterRate=0.12103897333145142}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-05T12:04:30,839 INFO [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(1114): Opened 181d56aec8b6c6c419fc4e8e3161c451; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=64046260, jitterRate=-0.04563635587692261}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-05T12:04:30,839 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 3ca2163992a160fdd5ff463a6a484b7f 2024-12-05T12:04:30,839 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 181d56aec8b6c6c419fc4e8e3161c451 2024-12-05T12:04:30,840 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(1006): Region open journal for 3ca2163992a160fdd5ff463a6a484b7f: Running coprocessor pre-open hook at 1733400270814Writing region info on filesystem at 1733400270814Initializing all the Stores at 1733400270816 (+2 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733400270816Cleaning up temporary data from old regions at 1733400270824 (+8 ms)Running coprocessor post-open hooks at 1733400270839 (+15 ms)Region opened successfully at 1733400270840 (+1 ms) 2024-12-05T12:04:30,840 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(1006): Region open journal for 181d56aec8b6c6c419fc4e8e3161c451: Running coprocessor pre-open hook at 1733400270812Writing region info on filesystem at 1733400270812Initializing all the Stores at 1733400270812Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733400270812Cleaning up temporary data from old regions at 1733400270824 (+12 ms)Running coprocessor post-open hooks at 1733400270839 (+15 ms)Region opened successfully at 1733400270840 (+1 ms) 2024-12-05T12:04:30,841 INFO [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportWithResetTtl,1,1733400270367.3ca2163992a160fdd5ff463a6a484b7f., pid=67, masterSystemTime=1733400270809 2024-12-05T12:04:30,841 INFO [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportWithResetTtl,,1733400270367.181d56aec8b6c6c419fc4e8e3161c451., pid=66, masterSystemTime=1733400270806 2024-12-05T12:04:30,843 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportWithResetTtl,1,1733400270367.3ca2163992a160fdd5ff463a6a484b7f. 2024-12-05T12:04:30,843 INFO [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] handler.AssignRegionHandler(153): Opened testtb-testExportWithResetTtl,1,1733400270367.3ca2163992a160fdd5ff463a6a484b7f. 2024-12-05T12:04:30,843 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=65 updating hbase:meta row=3ca2163992a160fdd5ff463a6a484b7f, regionState=OPEN, openSeqNum=2, regionLocation=80666a11a09f,42305,1733400131012 2024-12-05T12:04:30,844 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportWithResetTtl,,1733400270367.181d56aec8b6c6c419fc4e8e3161c451. 2024-12-05T12:04:30,844 INFO [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] handler.AssignRegionHandler(153): Opened testtb-testExportWithResetTtl,,1733400270367.181d56aec8b6c6c419fc4e8e3161c451. 2024-12-05T12:04:30,846 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=64 updating hbase:meta row=181d56aec8b6c6c419fc4e8e3161c451, regionState=OPEN, openSeqNum=2, regionLocation=80666a11a09f,37629,1733400130958 2024-12-05T12:04:30,847 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=67, ppid=65, state=RUNNABLE, hasLock=false; OpenRegionProcedure 3ca2163992a160fdd5ff463a6a484b7f, server=80666a11a09f,42305,1733400131012 because future has completed 2024-12-05T12:04:30,847 WARN [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34947 {}] assignment.AssignmentManager(1543): Unable to acquire lock for regionNode state=OPEN, location=80666a11a09f,42305,1733400131012, table=testtb-testExportWithResetTtl, region=3ca2163992a160fdd5ff463a6a484b7f. It is likely that another thread is currently holding the lock. To avoid deadlock, skip execution for now. 2024-12-05T12:04:30,850 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=66, ppid=64, state=RUNNABLE, hasLock=false; OpenRegionProcedure 181d56aec8b6c6c419fc4e8e3161c451, server=80666a11a09f,37629,1733400130958 because future has completed 2024-12-05T12:04:30,852 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=67, resume processing ppid=65 2024-12-05T12:04:30,852 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=67, ppid=65, state=SUCCESS, hasLock=false; OpenRegionProcedure 3ca2163992a160fdd5ff463a6a484b7f, server=80666a11a09f,42305,1733400131012 in 194 msec 2024-12-05T12:04:30,853 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=65, ppid=63, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=3ca2163992a160fdd5ff463a6a484b7f, ASSIGN in 355 msec 2024-12-05T12:04:30,853 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=66, resume processing ppid=64 2024-12-05T12:04:30,853 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=66, ppid=64, state=SUCCESS, hasLock=false; OpenRegionProcedure 181d56aec8b6c6c419fc4e8e3161c451, server=80666a11a09f,37629,1733400130958 in 196 msec 2024-12-05T12:04:30,854 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=64, resume processing ppid=63 2024-12-05T12:04:30,854 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=64, ppid=63, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=181d56aec8b6c6c419fc4e8e3161c451, ASSIGN in 356 msec 2024-12-05T12:04:30,856 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=63, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testtb-testExportWithResetTtl execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-05T12:04:30,856 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733400270856"}]},"ts":"1733400270856"} 2024-12-05T12:04:30,858 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithResetTtl, state=ENABLED in hbase:meta 2024-12-05T12:04:30,860 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=63, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportWithResetTtl execute state=CREATE_TABLE_POST_OPERATION 2024-12-05T12:04:30,860 DEBUG [PEWorker-4 {}] access.PermissionStorage(177): Writing permission with rowKey testtb-testExportWithResetTtl jenkins: RWXCA 2024-12-05T12:04:30,863 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42305 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportWithResetTtl], kv [jenkins: RWXCA] 2024-12-05T12:04:30,901 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42305-0x101a6a890970003, quorum=127.0.0.1:49240, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-05T12:04:30,901 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37629-0x101a6a890970002, quorum=127.0.0.1:49240, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-05T12:04:30,901 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34947-0x101a6a890970000, quorum=127.0.0.1:49240, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-05T12:04:30,901 DEBUG [pool-63-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42913-0x101a6a890970001, quorum=127.0.0.1:49240, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-05T12:04:30,909 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-05T12:04:30,909 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-05T12:04:30,909 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-05T12:04:30,910 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-05T12:04:30,912 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=63, state=SUCCESS, hasLock=false; CreateTableProcedure table=testtb-testExportWithResetTtl in 542 msec 2024-12-05T12:04:31,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=63 2024-12-05T12:04:31,018 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testtb-testExportWithResetTtl completed 2024-12-05T12:04:31,019 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithResetTtl,, stopping at row=testtb-testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-12-05T12:04:31,021 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testExportWithResetTtl 2024-12-05T12:04:31,021 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testExportWithResetTtl,,1733400270367.181d56aec8b6c6c419fc4e8e3161c451. 2024-12-05T12:04:31,022 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-05T12:04:31,024 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithResetTtl,, stopping at row=testtb-testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-12-05T12:04:31,029 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithResetTtl,, stopping at row=testtb-testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-12-05T12:04:31,035 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35950, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-05T12:04:31,037 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithResetTtl,, stopping at row=testtb-testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-12-05T12:04:31,040 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } 2024-12-05T12:04:31,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1733400271040 (current time:1733400271040). 2024-12-05T12:04:31,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-05T12:04:31,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot emptySnaptb0-testExportWithResetTtl VERSION not specified, setting to 2 2024-12-05T12:04:31,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-05T12:04:31,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6c04bb16, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T12:04:31,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] client.ClusterIdFetcher(90): Going to request 80666a11a09f,34947,-1 for getting cluster id 2024-12-05T12:04:31,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-05T12:04:31,042 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'af3bc8f5-5d92-4279-9ca6-4b73c59b8f54' 2024-12-05T12:04:31,042 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-05T12:04:31,042 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "af3bc8f5-5d92-4279-9ca6-4b73c59b8f54" 2024-12-05T12:04:31,042 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5d8506a5, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T12:04:31,042 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [80666a11a09f,34947,-1] 2024-12-05T12:04:31,043 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-05T12:04:31,043 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T12:04:31,044 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33858, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-05T12:04:31,044 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@36b9440, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T12:04:31,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-05T12:04:31,045 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=80666a11a09f,37629,1733400130958, seqNum=-1] 2024-12-05T12:04:31,045 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-05T12:04:31,046 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55422, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-05T12:04:31,048 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947. 2024-12-05T12:04:31,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-05T12:04:31,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T12:04:31,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T12:04:31,048 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-05T12:04:31,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2ebf27f1, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T12:04:31,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] client.ClusterIdFetcher(90): Going to request 80666a11a09f,34947,-1 for getting cluster id 2024-12-05T12:04:31,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-05T12:04:31,050 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'af3bc8f5-5d92-4279-9ca6-4b73c59b8f54' 2024-12-05T12:04:31,050 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-05T12:04:31,050 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "af3bc8f5-5d92-4279-9ca6-4b73c59b8f54" 2024-12-05T12:04:31,051 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@55d46d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T12:04:31,051 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [80666a11a09f,34947,-1] 2024-12-05T12:04:31,051 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-05T12:04:31,051 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T12:04:31,052 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33874, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-05T12:04:31,053 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@431a163d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T12:04:31,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-05T12:04:31,054 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=80666a11a09f,37629,1733400130958, seqNum=-1] 2024-12-05T12:04:31,055 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-05T12:04:31,056 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55424, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-05T12:04:31,058 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportWithResetTtl', locateType=CURRENT is [region=hbase:acl,,1733400133831.02119c1ced80736cbd0ab02ca19ee30c., hostname=80666a11a09f,42305,1733400131012, seqNum=2] 2024-12-05T12:04:31,058 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-05T12:04:31,059 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35956, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-05T12:04:31,060 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947. 2024-12-05T12:04:31,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-05T12:04:31,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T12:04:31,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T12:04:31,061 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-05T12:04:31,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportWithResetTtl], kv [jenkins: RWXCA] 2024-12-05T12:04:31,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-05T12:04:31,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] procedure2.ProcedureExecutor(1139): Stored pid=68, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } 2024-12-05T12:04:31,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 }, snapshot procedure id = 68 2024-12-05T12:04:31,063 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=68, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-05T12:04:31,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=68 2024-12-05T12:04:31,065 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=68, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-05T12:04:31,068 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=68, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-05T12:04:31,074 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073741971_1147 (size=161) 2024-12-05T12:04:31,075 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073741971_1147 (size=161) 2024-12-05T12:04:31,075 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073741971_1147 (size=161) 2024-12-05T12:04:31,076 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=68, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-05T12:04:31,077 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=69, ppid=68, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 181d56aec8b6c6c419fc4e8e3161c451}, {pid=70, ppid=68, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 3ca2163992a160fdd5ff463a6a484b7f}] 2024-12-05T12:04:31,078 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=70, ppid=68, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 3ca2163992a160fdd5ff463a6a484b7f 2024-12-05T12:04:31,078 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=69, ppid=68, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 181d56aec8b6c6c419fc4e8e3161c451 2024-12-05T12:04:31,092 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportWithResetTtl' 2024-12-05T12:04:31,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=68 2024-12-05T12:04:31,230 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37629 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=69 2024-12-05T12:04:31,230 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42305 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=70 2024-12-05T12:04:31,230 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=70}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithResetTtl,1,1733400270367.3ca2163992a160fdd5ff463a6a484b7f. 2024-12-05T12:04:31,230 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=69}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithResetTtl,,1733400270367.181d56aec8b6c6c419fc4e8e3161c451. 2024-12-05T12:04:31,230 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=70}] regionserver.HRegion(2603): Flush status journal for 3ca2163992a160fdd5ff463a6a484b7f: 2024-12-05T12:04:31,230 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=69}] regionserver.HRegion(2603): Flush status journal for 181d56aec8b6c6c419fc4e8e3161c451: 2024-12-05T12:04:31,230 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=70}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithResetTtl,1,1733400270367.3ca2163992a160fdd5ff463a6a484b7f. for emptySnaptb0-testExportWithResetTtl completed. 2024-12-05T12:04:31,230 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=69}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithResetTtl,,1733400270367.181d56aec8b6c6c419fc4e8e3161c451. for emptySnaptb0-testExportWithResetTtl completed. 2024-12-05T12:04:31,230 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=70}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithResetTtl,1,1733400270367.3ca2163992a160fdd5ff463a6a484b7f.' region-info for snapshot=emptySnaptb0-testExportWithResetTtl 2024-12-05T12:04:31,230 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=69}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithResetTtl,,1733400270367.181d56aec8b6c6c419fc4e8e3161c451.' region-info for snapshot=emptySnaptb0-testExportWithResetTtl 2024-12-05T12:04:31,230 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=70}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-05T12:04:31,230 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=69}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-05T12:04:31,230 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=70}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-05T12:04:31,230 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=69}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-05T12:04:31,236 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073741973_1149 (size=68) 2024-12-05T12:04:31,237 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073741973_1149 (size=68) 2024-12-05T12:04:31,237 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073741973_1149 (size=68) 2024-12-05T12:04:31,237 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073741972_1148 (size=68) 2024-12-05T12:04:31,237 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073741972_1148 (size=68) 2024-12-05T12:04:31,237 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=69}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithResetTtl,,1733400270367.181d56aec8b6c6c419fc4e8e3161c451. 2024-12-05T12:04:31,237 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073741972_1148 (size=68) 2024-12-05T12:04:31,237 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=69}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=69 2024-12-05T12:04:31,237 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=70}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithResetTtl,1,1733400270367.3ca2163992a160fdd5ff463a6a484b7f. 2024-12-05T12:04:31,237 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=70}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=70 2024-12-05T12:04:31,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] master.HMaster(4169): Remote procedure done, pid=69 2024-12-05T12:04:31,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] master.HMaster(4169): Remote procedure done, pid=70 2024-12-05T12:04:31,238 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportWithResetTtl on region 181d56aec8b6c6c419fc4e8e3161c451 2024-12-05T12:04:31,238 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportWithResetTtl on region 3ca2163992a160fdd5ff463a6a484b7f 2024-12-05T12:04:31,238 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=70, ppid=68, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 3ca2163992a160fdd5ff463a6a484b7f 2024-12-05T12:04:31,238 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=69, ppid=68, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 181d56aec8b6c6c419fc4e8e3161c451 2024-12-05T12:04:31,239 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=70, ppid=68, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 3ca2163992a160fdd5ff463a6a484b7f in 162 msec 2024-12-05T12:04:31,240 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=69, resume processing ppid=68 2024-12-05T12:04:31,240 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=69, ppid=68, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 181d56aec8b6c6c419fc4e8e3161c451 in 162 msec 2024-12-05T12:04:31,240 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=68, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-05T12:04:31,241 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=68, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-05T12:04:31,242 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-12-05T12:04:31,242 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-12-05T12:04:31,242 DEBUG [MobRegionSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T12:04:31,242 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(328): No files under family: cf 2024-12-05T12:04:31,252 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073741974_1150 (size=60) 2024-12-05T12:04:31,252 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073741974_1150 (size=60) 2024-12-05T12:04:31,252 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073741974_1150 (size=60) 2024-12-05T12:04:31,254 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=68, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-05T12:04:31,254 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testExportWithResetTtl 2024-12-05T12:04:31,255 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/.hbase-snapshot/.tmp/emptySnaptb0-testExportWithResetTtl 2024-12-05T12:04:31,266 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073741975_1151 (size=641) 2024-12-05T12:04:31,266 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073741975_1151 (size=641) 2024-12-05T12:04:31,266 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073741975_1151 (size=641) 2024-12-05T12:04:31,273 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=68, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-05T12:04:31,278 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=68, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-05T12:04:31,279 DEBUG [PEWorker-5 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/.hbase-snapshot/.tmp/emptySnaptb0-testExportWithResetTtl to hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/.hbase-snapshot/emptySnaptb0-testExportWithResetTtl 2024-12-05T12:04:31,281 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=68, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-05T12:04:31,281 DEBUG [PEWorker-5 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 }, snapshot procedure id = 68 2024-12-05T12:04:31,282 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=68, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } in 219 msec 2024-12-05T12:04:31,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=68 2024-12-05T12:04:31,378 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportWithResetTtl completed 2024-12-05T12:04:31,383 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37629 {}] regionserver.HRegion(8528): writing data to region testtb-testExportWithResetTtl,,1733400270367.181d56aec8b6c6c419fc4e8e3161c451. with WAL disabled. Data may be lost in the event of a crash. 2024-12-05T12:04:31,385 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42305 {}] regionserver.HRegion(8528): writing data to region testtb-testExportWithResetTtl,1,1733400270367.3ca2163992a160fdd5ff463a6a484b7f. with WAL disabled. Data may be lost in the event of a crash. 2024-12-05T12:04:31,386 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithResetTtl,, stopping at row=testtb-testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-12-05T12:04:31,389 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testExportWithResetTtl 2024-12-05T12:04:31,389 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testExportWithResetTtl,,1733400270367.181d56aec8b6c6c419fc4e8e3161c451. 2024-12-05T12:04:31,389 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-05T12:04:31,391 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithResetTtl,, stopping at row=testtb-testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-12-05T12:04:31,395 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithResetTtl,, stopping at row=testtb-testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-12-05T12:04:31,401 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithResetTtl,, stopping at row=testtb-testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-12-05T12:04:31,404 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } 2024-12-05T12:04:31,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1733400271404 (current time:1733400271404). 2024-12-05T12:04:31,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-05T12:04:31,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snaptb0-testExportWithResetTtl VERSION not specified, setting to 2 2024-12-05T12:04:31,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-05T12:04:31,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6588fc32, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T12:04:31,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] client.ClusterIdFetcher(90): Going to request 80666a11a09f,34947,-1 for getting cluster id 2024-12-05T12:04:31,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-05T12:04:31,406 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'af3bc8f5-5d92-4279-9ca6-4b73c59b8f54' 2024-12-05T12:04:31,406 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-05T12:04:31,406 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "af3bc8f5-5d92-4279-9ca6-4b73c59b8f54" 2024-12-05T12:04:31,406 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7828195b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T12:04:31,406 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [80666a11a09f,34947,-1] 2024-12-05T12:04:31,406 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-05T12:04:31,406 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T12:04:31,407 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33894, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-05T12:04:31,407 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6c77b14f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T12:04:31,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-05T12:04:31,408 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=80666a11a09f,37629,1733400130958, seqNum=-1] 2024-12-05T12:04:31,409 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-05T12:04:31,409 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55436, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-05T12:04:31,410 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947. 2024-12-05T12:04:31,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-05T12:04:31,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T12:04:31,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T12:04:31,411 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-05T12:04:31,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1f3bc01c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T12:04:31,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] client.ClusterIdFetcher(90): Going to request 80666a11a09f,34947,-1 for getting cluster id 2024-12-05T12:04:31,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-05T12:04:31,412 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'af3bc8f5-5d92-4279-9ca6-4b73c59b8f54' 2024-12-05T12:04:31,413 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-05T12:04:31,413 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "af3bc8f5-5d92-4279-9ca6-4b73c59b8f54" 2024-12-05T12:04:31,413 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3c0a8f1e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T12:04:31,413 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [80666a11a09f,34947,-1] 2024-12-05T12:04:31,413 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-05T12:04:31,413 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T12:04:31,414 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33904, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-05T12:04:31,414 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@27953700, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T12:04:31,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-05T12:04:31,430 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=80666a11a09f,37629,1733400130958, seqNum=-1] 2024-12-05T12:04:31,431 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-05T12:04:31,432 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55446, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-05T12:04:31,433 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportWithResetTtl', locateType=CURRENT is [region=hbase:acl,,1733400133831.02119c1ced80736cbd0ab02ca19ee30c., hostname=80666a11a09f,42305,1733400131012, seqNum=2] 2024-12-05T12:04:31,434 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-05T12:04:31,435 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35972, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-05T12:04:31,436 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947. 2024-12-05T12:04:31,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-05T12:04:31,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T12:04:31,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T12:04:31,436 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-05T12:04:31,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportWithResetTtl], kv [jenkins: RWXCA] 2024-12-05T12:04:31,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-05T12:04:31,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] procedure2.ProcedureExecutor(1139): Stored pid=71, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } 2024-12-05T12:04:31,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 }, snapshot procedure id = 71 2024-12-05T12:04:31,439 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=71, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-05T12:04:31,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=71 2024-12-05T12:04:31,440 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=71, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-05T12:04:31,443 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=71, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-05T12:04:31,448 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073741976_1152 (size=156) 2024-12-05T12:04:31,448 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073741976_1152 (size=156) 2024-12-05T12:04:31,449 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073741976_1152 (size=156) 2024-12-05T12:04:31,450 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=71, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-05T12:04:31,450 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=72, ppid=71, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 181d56aec8b6c6c419fc4e8e3161c451}, {pid=73, ppid=71, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 3ca2163992a160fdd5ff463a6a484b7f}] 2024-12-05T12:04:31,451 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=73, ppid=71, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 3ca2163992a160fdd5ff463a6a484b7f 2024-12-05T12:04:31,451 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=72, ppid=71, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 181d56aec8b6c6c419fc4e8e3161c451 2024-12-05T12:04:31,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=71 2024-12-05T12:04:31,603 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37629 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=72 2024-12-05T12:04:31,603 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42305 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=73 2024-12-05T12:04:31,603 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithResetTtl,1,1733400270367.3ca2163992a160fdd5ff463a6a484b7f. 2024-12-05T12:04:31,603 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] regionserver.HRegion(2902): Flushing 3ca2163992a160fdd5ff463a6a484b7f 1/1 column families, dataSize=2.87 KB heapSize=6.44 KB 2024-12-05T12:04:31,604 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithResetTtl,,1733400270367.181d56aec8b6c6c419fc4e8e3161c451. 2024-12-05T12:04:31,604 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] regionserver.HRegion(2902): Flushing 181d56aec8b6c6c419fc4e8e3161c451 1/1 column families, dataSize=400 B heapSize=1.09 KB 2024-12-05T12:04:31,627 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b2024120585221d021d304b3fb6693e723adedc52_3ca2163992a160fdd5ff463a6a484b7f is 71, key is 151cb806d941022b78ef116c6ab625d1/cf:q/1733400271384/Put/seqid=0 2024-12-05T12:04:31,644 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073741977_1153 (size=7962) 2024-12-05T12:04:31,644 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412054285b164b5b54185ba7eb48adf47fe4c_181d56aec8b6c6c419fc4e8e3161c451 is 71, key is 04de41109e417f2afee185403f81e4cc/cf:q/1733400271383/Put/seqid=0 2024-12-05T12:04:31,644 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073741977_1153 (size=7962) 2024-12-05T12:04:31,645 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073741977_1153 (size=7962) 2024-12-05T12:04:31,646 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T12:04:31,650 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073741978_1154 (size=5312) 2024-12-05T12:04:31,650 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073741978_1154 (size=5312) 2024-12-05T12:04:31,650 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073741978_1154 (size=5312) 2024-12-05T12:04:31,651 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T12:04:31,652 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b2024120585221d021d304b3fb6693e723adedc52_3ca2163992a160fdd5ff463a6a484b7f to hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/mobdir/data/default/testtb-testExportWithResetTtl/1bf72cc28ee4e494fb2ee93a7ed9fe6e/cf/c4ca4238a0b923820dcc509a6f75849b2024120585221d021d304b3fb6693e723adedc52_3ca2163992a160fdd5ff463a6a484b7f 2024-12-05T12:04:31,653 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportWithResetTtl/3ca2163992a160fdd5ff463a6a484b7f/.tmp/cf/67115f2d25c145c8969cff5182eaeb81, store: [table=testtb-testExportWithResetTtl family=cf region=3ca2163992a160fdd5ff463a6a484b7f] 2024-12-05T12:04:31,654 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportWithResetTtl/3ca2163992a160fdd5ff463a6a484b7f/.tmp/cf/67115f2d25c145c8969cff5182eaeb81 is 206, key is 14b562f4ac38704a7fba1608192756006/cf:q/1733400271384/Put/seqid=0 2024-12-05T12:04:31,658 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412054285b164b5b54185ba7eb48adf47fe4c_181d56aec8b6c6c419fc4e8e3161c451 to hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/mobdir/data/default/testtb-testExportWithResetTtl/1bf72cc28ee4e494fb2ee93a7ed9fe6e/cf/d41d8cd98f00b204e9800998ecf8427e202412054285b164b5b54185ba7eb48adf47fe4c_181d56aec8b6c6c419fc4e8e3161c451 2024-12-05T12:04:31,659 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportWithResetTtl/181d56aec8b6c6c419fc4e8e3161c451/.tmp/cf/4b9330b1548b484d94a60918aa464f41, store: [table=testtb-testExportWithResetTtl family=cf region=181d56aec8b6c6c419fc4e8e3161c451] 2024-12-05T12:04:31,660 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportWithResetTtl/181d56aec8b6c6c419fc4e8e3161c451/.tmp/cf/4b9330b1548b484d94a60918aa464f41 is 206, key is 03cc3be68b75efe0b02b936e5a1397a76/cf:q/1733400271383/Put/seqid=0 2024-12-05T12:04:31,664 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073741979_1155 (size=14249) 2024-12-05T12:04:31,664 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073741979_1155 (size=14249) 2024-12-05T12:04:31,665 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073741979_1155 (size=14249) 2024-12-05T12:04:31,665 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=6, memsize=2.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportWithResetTtl/3ca2163992a160fdd5ff463a6a484b7f/.tmp/cf/67115f2d25c145c8969cff5182eaeb81 2024-12-05T12:04:31,680 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073741980_1156 (size=6512) 2024-12-05T12:04:31,681 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073741980_1156 (size=6512) 2024-12-05T12:04:31,682 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=6, memsize=400, hasBloomFilter=true, into tmp file hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportWithResetTtl/181d56aec8b6c6c419fc4e8e3161c451/.tmp/cf/4b9330b1548b484d94a60918aa464f41 2024-12-05T12:04:31,682 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportWithResetTtl/3ca2163992a160fdd5ff463a6a484b7f/.tmp/cf/67115f2d25c145c8969cff5182eaeb81 as hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportWithResetTtl/3ca2163992a160fdd5ff463a6a484b7f/cf/67115f2d25c145c8969cff5182eaeb81 2024-12-05T12:04:31,683 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073741980_1156 (size=6512) 2024-12-05T12:04:31,695 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportWithResetTtl/3ca2163992a160fdd5ff463a6a484b7f/cf/67115f2d25c145c8969cff5182eaeb81, entries=44, sequenceid=6, filesize=13.9 K 2024-12-05T12:04:31,697 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportWithResetTtl/181d56aec8b6c6c419fc4e8e3161c451/.tmp/cf/4b9330b1548b484d94a60918aa464f41 as hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportWithResetTtl/181d56aec8b6c6c419fc4e8e3161c451/cf/4b9330b1548b484d94a60918aa464f41 2024-12-05T12:04:31,697 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] regionserver.HRegion(3140): Finished flush of dataSize ~2.87 KB/2936, heapSize ~6.42 KB/6576, currentSize=0 B/0 for 3ca2163992a160fdd5ff463a6a484b7f in 93ms, sequenceid=6, compaction requested=false 2024-12-05T12:04:31,697 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] regionserver.HRegion(2603): Flush status journal for 3ca2163992a160fdd5ff463a6a484b7f: 2024-12-05T12:04:31,697 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithResetTtl,1,1733400270367.3ca2163992a160fdd5ff463a6a484b7f. for snaptb0-testExportWithResetTtl completed. 2024-12-05T12:04:31,697 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithResetTtl,1,1733400270367.3ca2163992a160fdd5ff463a6a484b7f.' region-info for snapshot=snaptb0-testExportWithResetTtl 2024-12-05T12:04:31,698 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-05T12:04:31,698 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportWithResetTtl/3ca2163992a160fdd5ff463a6a484b7f/cf/67115f2d25c145c8969cff5182eaeb81] hfiles 2024-12-05T12:04:31,698 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportWithResetTtl/3ca2163992a160fdd5ff463a6a484b7f/cf/67115f2d25c145c8969cff5182eaeb81 for snapshot=snaptb0-testExportWithResetTtl 2024-12-05T12:04:31,707 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportWithResetTtl/181d56aec8b6c6c419fc4e8e3161c451/cf/4b9330b1548b484d94a60918aa464f41, entries=6, sequenceid=6, filesize=6.4 K 2024-12-05T12:04:31,708 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] regionserver.HRegion(3140): Finished flush of dataSize ~400 B/400, heapSize ~1.08 KB/1104, currentSize=0 B/0 for 181d56aec8b6c6c419fc4e8e3161c451 in 104ms, sequenceid=6, compaction requested=false 2024-12-05T12:04:31,708 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] regionserver.HRegion(2603): Flush status journal for 181d56aec8b6c6c419fc4e8e3161c451: 2024-12-05T12:04:31,708 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithResetTtl,,1733400270367.181d56aec8b6c6c419fc4e8e3161c451. for snaptb0-testExportWithResetTtl completed. 2024-12-05T12:04:31,709 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithResetTtl,,1733400270367.181d56aec8b6c6c419fc4e8e3161c451.' region-info for snapshot=snaptb0-testExportWithResetTtl 2024-12-05T12:04:31,709 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-05T12:04:31,709 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportWithResetTtl/181d56aec8b6c6c419fc4e8e3161c451/cf/4b9330b1548b484d94a60918aa464f41] hfiles 2024-12-05T12:04:31,709 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportWithResetTtl/181d56aec8b6c6c419fc4e8e3161c451/cf/4b9330b1548b484d94a60918aa464f41 for snapshot=snaptb0-testExportWithResetTtl 2024-12-05T12:04:31,719 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073741981_1157 (size=107) 2024-12-05T12:04:31,720 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073741981_1157 (size=107) 2024-12-05T12:04:31,720 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073741981_1157 (size=107) 2024-12-05T12:04:31,721 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithResetTtl,1,1733400270367.3ca2163992a160fdd5ff463a6a484b7f. 2024-12-05T12:04:31,721 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=73 2024-12-05T12:04:31,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] master.HMaster(4169): Remote procedure done, pid=73 2024-12-05T12:04:31,721 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportWithResetTtl on region 3ca2163992a160fdd5ff463a6a484b7f 2024-12-05T12:04:31,722 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=73, ppid=71, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 3ca2163992a160fdd5ff463a6a484b7f 2024-12-05T12:04:31,728 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=73, ppid=71, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 3ca2163992a160fdd5ff463a6a484b7f in 276 msec 2024-12-05T12:04:31,736 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073741982_1158 (size=107) 2024-12-05T12:04:31,736 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073741982_1158 (size=107) 2024-12-05T12:04:31,736 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073741982_1158 (size=107) 2024-12-05T12:04:31,737 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithResetTtl,,1733400270367.181d56aec8b6c6c419fc4e8e3161c451. 2024-12-05T12:04:31,737 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=72 2024-12-05T12:04:31,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] master.HMaster(4169): Remote procedure done, pid=72 2024-12-05T12:04:31,737 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportWithResetTtl on region 181d56aec8b6c6c419fc4e8e3161c451 2024-12-05T12:04:31,738 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=72, ppid=71, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 181d56aec8b6c6c419fc4e8e3161c451 2024-12-05T12:04:31,741 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=72, resume processing ppid=71 2024-12-05T12:04:31,741 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=71, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-05T12:04:31,741 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=72, ppid=71, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 181d56aec8b6c6c419fc4e8e3161c451 in 288 msec 2024-12-05T12:04:31,742 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=71, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-05T12:04:31,743 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-12-05T12:04:31,743 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-12-05T12:04:31,743 DEBUG [MobRegionSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T12:04:31,746 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(366): Adding snapshot references for [hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/mobdir/data/default/testtb-testExportWithResetTtl/1bf72cc28ee4e494fb2ee93a7ed9fe6e/cf/c4ca4238a0b923820dcc509a6f75849b2024120585221d021d304b3fb6693e723adedc52_3ca2163992a160fdd5ff463a6a484b7f, hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/mobdir/data/default/testtb-testExportWithResetTtl/1bf72cc28ee4e494fb2ee93a7ed9fe6e/cf/d41d8cd98f00b204e9800998ecf8427e202412054285b164b5b54185ba7eb48adf47fe4c_181d56aec8b6c6c419fc4e8e3161c451] hfiles 2024-12-05T12:04:31,746 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (1/2): hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/mobdir/data/default/testtb-testExportWithResetTtl/1bf72cc28ee4e494fb2ee93a7ed9fe6e/cf/c4ca4238a0b923820dcc509a6f75849b2024120585221d021d304b3fb6693e723adedc52_3ca2163992a160fdd5ff463a6a484b7f 2024-12-05T12:04:31,746 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (2/2): hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/mobdir/data/default/testtb-testExportWithResetTtl/1bf72cc28ee4e494fb2ee93a7ed9fe6e/cf/d41d8cd98f00b204e9800998ecf8427e202412054285b164b5b54185ba7eb48adf47fe4c_181d56aec8b6c6c419fc4e8e3161c451 2024-12-05T12:04:31,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=71 2024-12-05T12:04:31,777 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073741983_1159 (size=291) 2024-12-05T12:04:31,778 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073741983_1159 (size=291) 2024-12-05T12:04:31,778 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073741983_1159 (size=291) 2024-12-05T12:04:31,780 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=71, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-05T12:04:31,780 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testExportWithResetTtl 2024-12-05T12:04:31,780 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/.hbase-snapshot/.tmp/snaptb0-testExportWithResetTtl 2024-12-05T12:04:31,802 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073741984_1160 (size=951) 2024-12-05T12:04:31,802 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073741984_1160 (size=951) 2024-12-05T12:04:31,802 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073741984_1160 (size=951) 2024-12-05T12:04:31,805 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=71, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-05T12:04:31,813 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=71, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-05T12:04:31,814 DEBUG [PEWorker-3 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/.hbase-snapshot/.tmp/snaptb0-testExportWithResetTtl to hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/.hbase-snapshot/snaptb0-testExportWithResetTtl 2024-12-05T12:04:31,815 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=71, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-05T12:04:31,816 DEBUG [PEWorker-3 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 }, snapshot procedure id = 71 2024-12-05T12:04:31,817 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=71, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } in 379 msec 2024-12-05T12:04:32,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=71 2024-12-05T12:04:32,068 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportWithResetTtl completed 2024-12-05T12:04:32,069 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'testExportWithResetTtl', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-05T12:04:32,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] procedure2.ProcedureExecutor(1139): Stored pid=74, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testExportWithResetTtl 2024-12-05T12:04:32,071 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=74, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testExportWithResetTtl execute state=CREATE_TABLE_PRE_OPERATION 2024-12-05T12:04:32,071 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testExportWithResetTtl" procId is: 74 2024-12-05T12:04:32,072 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=74, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testExportWithResetTtl execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-05T12:04:32,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=74 2024-12-05T12:04:32,079 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073741985_1161 (size=433) 2024-12-05T12:04:32,080 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073741985_1161 (size=433) 2024-12-05T12:04:32,080 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073741985_1161 (size=433) 2024-12-05T12:04:32,082 INFO [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => a39a77147c9cacfb540811cfb8825dbb, NAME => 'testExportWithResetTtl,,1733400272069.a39a77147c9cacfb540811cfb8825dbb.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testExportWithResetTtl', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300 2024-12-05T12:04:32,082 INFO [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => 2b8ebcd03601de43e74d160c9632e879, NAME => 'testExportWithResetTtl,1,1733400272069.2b8ebcd03601de43e74d160c9632e879.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testExportWithResetTtl', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300 2024-12-05T12:04:32,095 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073741987_1163 (size=58) 2024-12-05T12:04:32,095 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073741987_1163 (size=58) 2024-12-05T12:04:32,095 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073741987_1163 (size=58) 2024-12-05T12:04:32,096 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(898): Instantiated testExportWithResetTtl,1,1733400272069.2b8ebcd03601de43e74d160c9632e879.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-05T12:04:32,096 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1722): Closing 2b8ebcd03601de43e74d160c9632e879, disabling compactions & flushes 2024-12-05T12:04:32,096 INFO [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1755): Closing region testExportWithResetTtl,1,1733400272069.2b8ebcd03601de43e74d160c9632e879. 2024-12-05T12:04:32,096 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testExportWithResetTtl,1,1733400272069.2b8ebcd03601de43e74d160c9632e879. 2024-12-05T12:04:32,096 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testExportWithResetTtl,1,1733400272069.2b8ebcd03601de43e74d160c9632e879. after waiting 0 ms 2024-12-05T12:04:32,096 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testExportWithResetTtl,1,1733400272069.2b8ebcd03601de43e74d160c9632e879. 2024-12-05T12:04:32,096 INFO [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1973): Closed testExportWithResetTtl,1,1733400272069.2b8ebcd03601de43e74d160c9632e879. 2024-12-05T12:04:32,096 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1676): Region close journal for 2b8ebcd03601de43e74d160c9632e879: Waiting for close lock at 1733400272096Disabling compacts and flushes for region at 1733400272096Disabling writes for close at 1733400272096Writing region close event to WAL at 1733400272096Closed at 1733400272096 2024-12-05T12:04:32,102 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073741986_1162 (size=58) 2024-12-05T12:04:32,102 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073741986_1162 (size=58) 2024-12-05T12:04:32,102 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073741986_1162 (size=58) 2024-12-05T12:04:32,103 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(898): Instantiated testExportWithResetTtl,,1733400272069.a39a77147c9cacfb540811cfb8825dbb.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-05T12:04:32,104 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1722): Closing a39a77147c9cacfb540811cfb8825dbb, disabling compactions & flushes 2024-12-05T12:04:32,104 INFO [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1755): Closing region testExportWithResetTtl,,1733400272069.a39a77147c9cacfb540811cfb8825dbb. 2024-12-05T12:04:32,104 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testExportWithResetTtl,,1733400272069.a39a77147c9cacfb540811cfb8825dbb. 2024-12-05T12:04:32,104 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testExportWithResetTtl,,1733400272069.a39a77147c9cacfb540811cfb8825dbb. after waiting 0 ms 2024-12-05T12:04:32,104 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testExportWithResetTtl,,1733400272069.a39a77147c9cacfb540811cfb8825dbb. 2024-12-05T12:04:32,104 INFO [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1973): Closed testExportWithResetTtl,,1733400272069.a39a77147c9cacfb540811cfb8825dbb. 2024-12-05T12:04:32,104 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1676): Region close journal for a39a77147c9cacfb540811cfb8825dbb: Waiting for close lock at 1733400272104Disabling compacts and flushes for region at 1733400272104Disabling writes for close at 1733400272104Writing region close event to WAL at 1733400272104Closed at 1733400272104 2024-12-05T12:04:32,106 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=74, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testExportWithResetTtl execute state=CREATE_TABLE_ADD_TO_META 2024-12-05T12:04:32,106 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testExportWithResetTtl,1,1733400272069.2b8ebcd03601de43e74d160c9632e879.","families":{"info":[{"qualifier":"regioninfo","vlen":57,"tag":[],"timestamp":"1733400272106"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733400272106"}]},"ts":"1733400272106"} 2024-12-05T12:04:32,106 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testExportWithResetTtl,,1733400272069.a39a77147c9cacfb540811cfb8825dbb.","families":{"info":[{"qualifier":"regioninfo","vlen":57,"tag":[],"timestamp":"1733400272106"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733400272106"}]},"ts":"1733400272106"} 2024-12-05T12:04:32,109 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-12-05T12:04:32,110 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=74, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testExportWithResetTtl execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-05T12:04:32,110 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733400272110"}]},"ts":"1733400272110"} 2024-12-05T12:04:32,113 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=testExportWithResetTtl, state=ENABLING in hbase:meta 2024-12-05T12:04:32,113 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(204): Hosts are {80666a11a09f=0} racks are {/default-rack=0} 2024-12-05T12:04:32,115 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-05T12:04:32,115 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-05T12:04:32,115 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-05T12:04:32,115 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-05T12:04:32,115 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-05T12:04:32,115 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-05T12:04:32,115 INFO [PEWorker-1 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-05T12:04:32,115 INFO [PEWorker-1 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-05T12:04:32,115 INFO [PEWorker-1 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-05T12:04:32,115 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-05T12:04:32,116 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=75, ppid=74, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=a39a77147c9cacfb540811cfb8825dbb, ASSIGN}, {pid=76, ppid=74, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=2b8ebcd03601de43e74d160c9632e879, ASSIGN}] 2024-12-05T12:04:32,117 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=76, ppid=74, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=2b8ebcd03601de43e74d160c9632e879, ASSIGN 2024-12-05T12:04:32,118 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=75, ppid=74, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=a39a77147c9cacfb540811cfb8825dbb, ASSIGN 2024-12-05T12:04:32,118 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=76, ppid=74, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testExportWithResetTtl, region=2b8ebcd03601de43e74d160c9632e879, ASSIGN; state=OFFLINE, location=80666a11a09f,37629,1733400130958; forceNewPlan=false, retain=false 2024-12-05T12:04:32,118 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=75, ppid=74, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testExportWithResetTtl, region=a39a77147c9cacfb540811cfb8825dbb, ASSIGN; state=OFFLINE, location=80666a11a09f,42913,1733400130845; forceNewPlan=false, retain=false 2024-12-05T12:04:32,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=74 2024-12-05T12:04:32,269 INFO [80666a11a09f:34947 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-12-05T12:04:32,269 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=75 updating hbase:meta row=a39a77147c9cacfb540811cfb8825dbb, regionState=OPENING, regionLocation=80666a11a09f,42913,1733400130845 2024-12-05T12:04:32,269 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=76 updating hbase:meta row=2b8ebcd03601de43e74d160c9632e879, regionState=OPENING, regionLocation=80666a11a09f,37629,1733400130958 2024-12-05T12:04:32,271 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=75, ppid=74, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testExportWithResetTtl, region=a39a77147c9cacfb540811cfb8825dbb, ASSIGN because future has completed 2024-12-05T12:04:32,271 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=77, ppid=75, state=RUNNABLE, hasLock=false; OpenRegionProcedure a39a77147c9cacfb540811cfb8825dbb, server=80666a11a09f,42913,1733400130845}] 2024-12-05T12:04:32,272 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=76, ppid=74, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testExportWithResetTtl, region=2b8ebcd03601de43e74d160c9632e879, ASSIGN because future has completed 2024-12-05T12:04:32,273 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=78, ppid=76, state=RUNNABLE, hasLock=false; OpenRegionProcedure 2b8ebcd03601de43e74d160c9632e879, server=80666a11a09f,37629,1733400130958}] 2024-12-05T12:04:32,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=74 2024-12-05T12:04:32,427 INFO [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] handler.AssignRegionHandler(132): Open testExportWithResetTtl,,1733400272069.a39a77147c9cacfb540811cfb8825dbb. 2024-12-05T12:04:32,427 INFO [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] handler.AssignRegionHandler(132): Open testExportWithResetTtl,1,1733400272069.2b8ebcd03601de43e74d160c9632e879. 2024-12-05T12:04:32,427 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] regionserver.HRegion(7752): Opening region: {ENCODED => a39a77147c9cacfb540811cfb8825dbb, NAME => 'testExportWithResetTtl,,1733400272069.a39a77147c9cacfb540811cfb8825dbb.', STARTKEY => '', ENDKEY => '1'} 2024-12-05T12:04:32,427 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] regionserver.HRegion(7752): Opening region: {ENCODED => 2b8ebcd03601de43e74d160c9632e879, NAME => 'testExportWithResetTtl,1,1733400272069.2b8ebcd03601de43e74d160c9632e879.', STARTKEY => '1', ENDKEY => ''} 2024-12-05T12:04:32,428 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] regionserver.HRegion(8280): Registered coprocessor service: region=testExportWithResetTtl,,1733400272069.a39a77147c9cacfb540811cfb8825dbb. service=AccessControlService 2024-12-05T12:04:32,428 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] regionserver.HRegion(8280): Registered coprocessor service: region=testExportWithResetTtl,1,1733400272069.2b8ebcd03601de43e74d160c9632e879. service=AccessControlService 2024-12-05T12:04:32,428 INFO [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-05T12:04:32,428 INFO [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-05T12:04:32,428 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testExportWithResetTtl a39a77147c9cacfb540811cfb8825dbb 2024-12-05T12:04:32,428 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testExportWithResetTtl 2b8ebcd03601de43e74d160c9632e879 2024-12-05T12:04:32,428 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] regionserver.HRegion(898): Instantiated testExportWithResetTtl,,1733400272069.a39a77147c9cacfb540811cfb8825dbb.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-05T12:04:32,429 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] regionserver.HRegion(898): Instantiated testExportWithResetTtl,1,1733400272069.2b8ebcd03601de43e74d160c9632e879.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-05T12:04:32,429 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] regionserver.HRegion(7794): checking encryption for a39a77147c9cacfb540811cfb8825dbb 2024-12-05T12:04:32,429 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] regionserver.HRegion(7794): checking encryption for 2b8ebcd03601de43e74d160c9632e879 2024-12-05T12:04:32,429 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] regionserver.HRegion(7797): checking classloading for a39a77147c9cacfb540811cfb8825dbb 2024-12-05T12:04:32,429 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] regionserver.HRegion(7797): checking classloading for 2b8ebcd03601de43e74d160c9632e879 2024-12-05T12:04:32,430 INFO [StoreOpener-a39a77147c9cacfb540811cfb8825dbb-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region a39a77147c9cacfb540811cfb8825dbb 2024-12-05T12:04:32,430 INFO [StoreOpener-2b8ebcd03601de43e74d160c9632e879-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 2b8ebcd03601de43e74d160c9632e879 2024-12-05T12:04:32,432 INFO [StoreOpener-a39a77147c9cacfb540811cfb8825dbb-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region a39a77147c9cacfb540811cfb8825dbb columnFamilyName cf 2024-12-05T12:04:32,432 INFO [StoreOpener-2b8ebcd03601de43e74d160c9632e879-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 2b8ebcd03601de43e74d160c9632e879 columnFamilyName cf 2024-12-05T12:04:32,433 DEBUG [StoreOpener-2b8ebcd03601de43e74d160c9632e879-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T12:04:32,433 DEBUG [StoreOpener-a39a77147c9cacfb540811cfb8825dbb-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T12:04:32,433 INFO [StoreOpener-a39a77147c9cacfb540811cfb8825dbb-1 {}] regionserver.HStore(327): Store=a39a77147c9cacfb540811cfb8825dbb/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-05T12:04:32,433 INFO [StoreOpener-2b8ebcd03601de43e74d160c9632e879-1 {}] regionserver.HStore(327): Store=2b8ebcd03601de43e74d160c9632e879/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-05T12:04:32,434 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] regionserver.HRegion(1038): replaying wal for 2b8ebcd03601de43e74d160c9632e879 2024-12-05T12:04:32,434 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] regionserver.HRegion(1038): replaying wal for a39a77147c9cacfb540811cfb8825dbb 2024-12-05T12:04:32,435 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testExportWithResetTtl/2b8ebcd03601de43e74d160c9632e879 2024-12-05T12:04:32,435 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testExportWithResetTtl/a39a77147c9cacfb540811cfb8825dbb 2024-12-05T12:04:32,435 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testExportWithResetTtl/2b8ebcd03601de43e74d160c9632e879 2024-12-05T12:04:32,436 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testExportWithResetTtl/a39a77147c9cacfb540811cfb8825dbb 2024-12-05T12:04:32,436 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] regionserver.HRegion(1048): stopping wal replay for 2b8ebcd03601de43e74d160c9632e879 2024-12-05T12:04:32,437 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] regionserver.HRegion(1060): Cleaning up temporary data for 2b8ebcd03601de43e74d160c9632e879 2024-12-05T12:04:32,437 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] regionserver.HRegion(1048): stopping wal replay for a39a77147c9cacfb540811cfb8825dbb 2024-12-05T12:04:32,437 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] regionserver.HRegion(1060): Cleaning up temporary data for a39a77147c9cacfb540811cfb8825dbb 2024-12-05T12:04:32,439 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] regionserver.HRegion(1093): writing seq id for a39a77147c9cacfb540811cfb8825dbb 2024-12-05T12:04:32,439 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] regionserver.HRegion(1093): writing seq id for 2b8ebcd03601de43e74d160c9632e879 2024-12-05T12:04:32,441 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testExportWithResetTtl/a39a77147c9cacfb540811cfb8825dbb/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-05T12:04:32,441 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testExportWithResetTtl/2b8ebcd03601de43e74d160c9632e879/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-05T12:04:32,442 INFO [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] regionserver.HRegion(1114): Opened a39a77147c9cacfb540811cfb8825dbb; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=69232095, jitterRate=0.03163860738277435}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-05T12:04:32,442 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] regionserver.HRegion(1122): Running coprocessor post-open hooks for a39a77147c9cacfb540811cfb8825dbb 2024-12-05T12:04:32,442 INFO [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] regionserver.HRegion(1114): Opened 2b8ebcd03601de43e74d160c9632e879; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=69849965, jitterRate=0.040845587849617004}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-05T12:04:32,442 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 2b8ebcd03601de43e74d160c9632e879 2024-12-05T12:04:32,443 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] regionserver.HRegion(1006): Region open journal for 2b8ebcd03601de43e74d160c9632e879: Running coprocessor pre-open hook at 1733400272429Writing region info on filesystem at 1733400272429Initializing all the Stores at 1733400272430 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733400272430Cleaning up temporary data from old regions at 1733400272437 (+7 ms)Running coprocessor post-open hooks at 1733400272442 (+5 ms)Region opened successfully at 1733400272443 (+1 ms) 2024-12-05T12:04:32,443 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] regionserver.HRegion(1006): Region open journal for a39a77147c9cacfb540811cfb8825dbb: Running coprocessor pre-open hook at 1733400272429Writing region info on filesystem at 1733400272429Initializing all the Stores at 1733400272430 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733400272430Cleaning up temporary data from old regions at 1733400272437 (+7 ms)Running coprocessor post-open hooks at 1733400272442 (+5 ms)Region opened successfully at 1733400272443 (+1 ms) 2024-12-05T12:04:32,444 INFO [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] regionserver.HRegionServer(2236): Post open deploy tasks for testExportWithResetTtl,1,1733400272069.2b8ebcd03601de43e74d160c9632e879., pid=78, masterSystemTime=1733400272424 2024-12-05T12:04:32,444 INFO [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] regionserver.HRegionServer(2236): Post open deploy tasks for testExportWithResetTtl,,1733400272069.a39a77147c9cacfb540811cfb8825dbb., pid=77, masterSystemTime=1733400272423 2024-12-05T12:04:32,446 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] regionserver.HRegionServer(2266): Finished post open deploy task for testExportWithResetTtl,1,1733400272069.2b8ebcd03601de43e74d160c9632e879. 2024-12-05T12:04:32,446 INFO [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] handler.AssignRegionHandler(153): Opened testExportWithResetTtl,1,1733400272069.2b8ebcd03601de43e74d160c9632e879. 2024-12-05T12:04:32,446 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] regionserver.HRegionServer(2266): Finished post open deploy task for testExportWithResetTtl,,1733400272069.a39a77147c9cacfb540811cfb8825dbb. 2024-12-05T12:04:32,446 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=76 updating hbase:meta row=2b8ebcd03601de43e74d160c9632e879, regionState=OPEN, openSeqNum=2, regionLocation=80666a11a09f,37629,1733400130958 2024-12-05T12:04:32,446 INFO [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] handler.AssignRegionHandler(153): Opened testExportWithResetTtl,,1733400272069.a39a77147c9cacfb540811cfb8825dbb. 2024-12-05T12:04:32,447 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=75 updating hbase:meta row=a39a77147c9cacfb540811cfb8825dbb, regionState=OPEN, openSeqNum=2, regionLocation=80666a11a09f,42913,1733400130845 2024-12-05T12:04:32,448 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=78, ppid=76, state=RUNNABLE, hasLock=false; OpenRegionProcedure 2b8ebcd03601de43e74d160c9632e879, server=80666a11a09f,37629,1733400130958 because future has completed 2024-12-05T12:04:32,450 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=77, ppid=75, state=RUNNABLE, hasLock=false; OpenRegionProcedure a39a77147c9cacfb540811cfb8825dbb, server=80666a11a09f,42913,1733400130845 because future has completed 2024-12-05T12:04:32,451 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=78, resume processing ppid=76 2024-12-05T12:04:32,451 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=78, ppid=76, state=SUCCESS, hasLock=false; OpenRegionProcedure 2b8ebcd03601de43e74d160c9632e879, server=80666a11a09f,37629,1733400130958 in 177 msec 2024-12-05T12:04:32,452 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=77, resume processing ppid=75 2024-12-05T12:04:32,452 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=77, ppid=75, state=SUCCESS, hasLock=false; OpenRegionProcedure a39a77147c9cacfb540811cfb8825dbb, server=80666a11a09f,42913,1733400130845 in 180 msec 2024-12-05T12:04:32,452 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=76, ppid=74, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=2b8ebcd03601de43e74d160c9632e879, ASSIGN in 335 msec 2024-12-05T12:04:32,454 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=75, resume processing ppid=74 2024-12-05T12:04:32,454 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=75, ppid=74, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=a39a77147c9cacfb540811cfb8825dbb, ASSIGN in 336 msec 2024-12-05T12:04:32,454 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=74, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testExportWithResetTtl execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-05T12:04:32,454 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733400272454"}]},"ts":"1733400272454"} 2024-12-05T12:04:32,456 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=testExportWithResetTtl, state=ENABLED in hbase:meta 2024-12-05T12:04:32,457 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=74, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testExportWithResetTtl execute state=CREATE_TABLE_POST_OPERATION 2024-12-05T12:04:32,457 DEBUG [PEWorker-1 {}] access.PermissionStorage(177): Writing permission with rowKey testExportWithResetTtl jenkins: RWXCA 2024-12-05T12:04:32,460 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42305 {}] access.PermissionStorage(613): Read acl: entry[testExportWithResetTtl], kv [jenkins: RWXCA] 2024-12-05T12:04:32,500 DEBUG [pool-63-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42913-0x101a6a890970001, quorum=127.0.0.1:49240, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-05T12:04:32,500 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34947-0x101a6a890970000, quorum=127.0.0.1:49240, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-05T12:04:32,500 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42305-0x101a6a890970003, quorum=127.0.0.1:49240, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-05T12:04:32,500 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37629-0x101a6a890970002, quorum=127.0.0.1:49240, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-05T12:04:32,534 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-05T12:04:32,534 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportWithResetTtl with data PBUF\x0A<\x0A\x07jenkins\x121\x08\x03"-\x0A!\x0A\x07default\x12\x16testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-05T12:04:32,535 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-05T12:04:32,535 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-05T12:04:32,535 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportWithResetTtl with data PBUF\x0A<\x0A\x07jenkins\x121\x08\x03"-\x0A!\x0A\x07default\x12\x16testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-05T12:04:32,535 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportWithResetTtl with data PBUF\x0A<\x0A\x07jenkins\x121\x08\x03"-\x0A!\x0A\x07default\x12\x16testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-05T12:04:32,535 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-05T12:04:32,535 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportWithResetTtl with data PBUF\x0A<\x0A\x07jenkins\x121\x08\x03"-\x0A!\x0A\x07default\x12\x16testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-05T12:04:32,536 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=74, state=SUCCESS, hasLock=false; CreateTableProcedure table=testExportWithResetTtl in 465 msec 2024-12-05T12:04:32,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=74 2024-12-05T12:04:32,698 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testExportWithResetTtl completed 2024-12-05T12:04:32,698 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportWithResetTtl,, stopping at row=testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-12-05T12:04:32,701 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testExportWithResetTtl 2024-12-05T12:04:32,701 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testExportWithResetTtl,,1733400272069.a39a77147c9cacfb540811cfb8825dbb. 2024-12-05T12:04:32,702 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-05T12:04:32,704 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportWithResetTtl,, stopping at row=testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-12-05T12:04:32,709 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportWithResetTtl,, stopping at row=testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-12-05T12:04:32,715 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportWithResetTtl,, stopping at row=testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-12-05T12:04:32,724 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42913 {}] regionserver.HRegion(8528): writing data to region testExportWithResetTtl,,1733400272069.a39a77147c9cacfb540811cfb8825dbb. with WAL disabled. Data may be lost in the event of a crash. 2024-12-05T12:04:32,726 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37629 {}] regionserver.HRegion(8528): writing data to region testExportWithResetTtl,1,1733400272069.2b8ebcd03601de43e74d160c9632e879. with WAL disabled. Data may be lost in the event of a crash. 2024-12-05T12:04:32,727 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportWithResetTtl,, stopping at row=testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-12-05T12:04:32,730 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testExportWithResetTtl 2024-12-05T12:04:32,730 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testExportWithResetTtl,,1733400272069.a39a77147c9cacfb540811cfb8825dbb. 2024-12-05T12:04:32,730 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-05T12:04:32,731 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportWithResetTtl,, stopping at row=testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-12-05T12:04:32,736 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportWithResetTtl,, stopping at row=testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-12-05T12:04:32,743 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportWithResetTtl,, stopping at row=testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-12-05T12:04:32,746 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } 2024-12-05T12:04:32,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1733400272746 (current time:1733400272746). 2024-12-05T12:04:32,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snaptb-testExportWithResetTtl VERSION not specified, setting to 2 2024-12-05T12:04:32,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-05T12:04:32,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@32b71910, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T12:04:32,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] client.ClusterIdFetcher(90): Going to request 80666a11a09f,34947,-1 for getting cluster id 2024-12-05T12:04:32,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-05T12:04:32,748 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'af3bc8f5-5d92-4279-9ca6-4b73c59b8f54' 2024-12-05T12:04:32,748 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-05T12:04:32,748 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "af3bc8f5-5d92-4279-9ca6-4b73c59b8f54" 2024-12-05T12:04:32,748 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2cc0322f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T12:04:32,748 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [80666a11a09f,34947,-1] 2024-12-05T12:04:32,748 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-05T12:04:32,749 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T12:04:32,749 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33914, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-05T12:04:32,750 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@35dbc6b1, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T12:04:32,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-05T12:04:32,751 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=80666a11a09f,37629,1733400130958, seqNum=-1] 2024-12-05T12:04:32,752 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-05T12:04:32,753 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55450, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-05T12:04:32,754 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947. 2024-12-05T12:04:32,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-05T12:04:32,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T12:04:32,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T12:04:32,755 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-05T12:04:32,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@757c46b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T12:04:32,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] client.ClusterIdFetcher(90): Going to request 80666a11a09f,34947,-1 for getting cluster id 2024-12-05T12:04:32,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-05T12:04:32,757 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'af3bc8f5-5d92-4279-9ca6-4b73c59b8f54' 2024-12-05T12:04:32,758 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-05T12:04:32,758 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "af3bc8f5-5d92-4279-9ca6-4b73c59b8f54" 2024-12-05T12:04:32,758 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4c34fb4e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T12:04:32,758 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [80666a11a09f,34947,-1] 2024-12-05T12:04:32,758 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-05T12:04:32,759 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T12:04:32,759 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33938, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-05T12:04:32,760 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@56fce725, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T12:04:32,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-05T12:04:32,762 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=80666a11a09f,37629,1733400130958, seqNum=-1] 2024-12-05T12:04:32,762 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-05T12:04:32,763 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55462, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-05T12:04:32,765 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testExportWithResetTtl', locateType=CURRENT is [region=hbase:acl,,1733400133831.02119c1ced80736cbd0ab02ca19ee30c., hostname=80666a11a09f,42305,1733400131012, seqNum=2] 2024-12-05T12:04:32,765 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-05T12:04:32,766 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35974, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-05T12:04:32,767 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947. 2024-12-05T12:04:32,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-05T12:04:32,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T12:04:32,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T12:04:32,768 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-05T12:04:32,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] access.PermissionStorage(613): Read acl: entry[testExportWithResetTtl], kv [jenkins: RWXCA] 2024-12-05T12:04:32,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-05T12:04:32,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] procedure2.ProcedureExecutor(1139): Stored pid=79, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=79, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } 2024-12-05T12:04:32,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 }, snapshot procedure id = 79 2024-12-05T12:04:32,770 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=79, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=79, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_PREPARE 2024-12-05T12:04:32,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=79 2024-12-05T12:04:32,771 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=79, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=79, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-05T12:04:32,773 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=79, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=79, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-05T12:04:32,779 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073741988_1164 (size=143) 2024-12-05T12:04:32,779 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073741988_1164 (size=143) 2024-12-05T12:04:32,781 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073741988_1164 (size=143) 2024-12-05T12:04:32,782 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=79, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=79, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-05T12:04:32,783 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=80, ppid=79, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure a39a77147c9cacfb540811cfb8825dbb}, {pid=81, ppid=79, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 2b8ebcd03601de43e74d160c9632e879}] 2024-12-05T12:04:32,784 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=81, ppid=79, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 2b8ebcd03601de43e74d160c9632e879 2024-12-05T12:04:32,784 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=80, ppid=79, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure a39a77147c9cacfb540811cfb8825dbb 2024-12-05T12:04:32,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=79 2024-12-05T12:04:32,929 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_693627247/yarn-7091037402/MiniMRCluster_693627247-localDir-nm-1_1/usercache/jenkins/appcache/application_1733400137807_0002/container_1733400137807_0002_01_000005/launch_container.sh] 2024-12-05T12:04:32,930 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_693627247/yarn-7091037402/MiniMRCluster_693627247-localDir-nm-1_1/usercache/jenkins/appcache/application_1733400137807_0002/container_1733400137807_0002_01_000005/container_tokens] 2024-12-05T12:04:32,930 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_693627247/yarn-7091037402/MiniMRCluster_693627247-localDir-nm-1_1/usercache/jenkins/appcache/application_1733400137807_0002/container_1733400137807_0002_01_000005/sysfs] 2024-12-05T12:04:32,936 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42913 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=80 2024-12-05T12:04:32,936 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37629 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=81 2024-12-05T12:04:32,936 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testExportWithResetTtl,1,1733400272069.2b8ebcd03601de43e74d160c9632e879. 2024-12-05T12:04:32,936 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] regionserver.HRegion(2902): Flushing 2b8ebcd03601de43e74d160c9632e879 1/1 column families, dataSize=3.13 KB heapSize=7 KB 2024-12-05T12:04:32,938 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testExportWithResetTtl,,1733400272069.a39a77147c9cacfb540811cfb8825dbb. 2024-12-05T12:04:32,938 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] regionserver.HRegion(2902): Flushing a39a77147c9cacfb540811cfb8825dbb 1/1 column families, dataSize=132 B heapSize=544 B 2024-12-05T12:04:32,968 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241205a85c7a2f26c845609620259cb88392a6_a39a77147c9cacfb540811cfb8825dbb is 71, key is 0558f8d05540f6da91a784f5aab6675f/cf:q/1733400272724/Put/seqid=0 2024-12-05T12:04:32,979 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073741989_1165 (size=5032) 2024-12-05T12:04:32,979 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073741989_1165 (size=5032) 2024-12-05T12:04:32,979 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073741989_1165 (size=5032) 2024-12-05T12:04:32,980 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T12:04:32,985 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241205a85c7a2f26c845609620259cb88392a6_a39a77147c9cacfb540811cfb8825dbb to hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/mobdir/data/default/testExportWithResetTtl/ea1f2913460bad1dc9f5d962c597c09d/cf/d41d8cd98f00b204e9800998ecf8427e20241205a85c7a2f26c845609620259cb88392a6_a39a77147c9cacfb540811cfb8825dbb 2024-12-05T12:04:32,985 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b2024120504432820bbbb4b41a97bd39e9ba80ba7_2b8ebcd03601de43e74d160c9632e879 is 71, key is 12d071044e59fc1e9713e05040123f83/cf:q/1733400272725/Put/seqid=0 2024-12-05T12:04:32,986 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testExportWithResetTtl/a39a77147c9cacfb540811cfb8825dbb/.tmp/cf/71a1748c8ae147d4ab8a825ba6753445, store: [table=testExportWithResetTtl family=cf region=a39a77147c9cacfb540811cfb8825dbb] 2024-12-05T12:04:32,986 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testExportWithResetTtl/a39a77147c9cacfb540811cfb8825dbb/.tmp/cf/71a1748c8ae147d4ab8a825ba6753445 is 199, key is 03d8a7bfcb2f64caf89899424af9361f8/cf:q/1733400272724/Put/seqid=0 2024-12-05T12:04:32,996 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073741990_1166 (size=8242) 2024-12-05T12:04:32,996 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073741990_1166 (size=8242) 2024-12-05T12:04:32,997 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073741990_1166 (size=8242) 2024-12-05T12:04:32,997 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T12:04:33,003 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b2024120504432820bbbb4b41a97bd39e9ba80ba7_2b8ebcd03601de43e74d160c9632e879 to hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/mobdir/data/default/testExportWithResetTtl/ea1f2913460bad1dc9f5d962c597c09d/cf/c4ca4238a0b923820dcc509a6f75849b2024120504432820bbbb4b41a97bd39e9ba80ba7_2b8ebcd03601de43e74d160c9632e879 2024-12-05T12:04:33,004 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testExportWithResetTtl/2b8ebcd03601de43e74d160c9632e879/.tmp/cf/7acd4144714440c0be555e733325ec9a, store: [table=testExportWithResetTtl family=cf region=2b8ebcd03601de43e74d160c9632e879] 2024-12-05T12:04:33,005 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testExportWithResetTtl/2b8ebcd03601de43e74d160c9632e879/.tmp/cf/7acd4144714440c0be555e733325ec9a is 199, key is 1064dce6cfe4d6a829fedc693bcddaf99/cf:q/1733400272725/Put/seqid=0 2024-12-05T12:04:33,005 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073741991_1167 (size=5679) 2024-12-05T12:04:33,005 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073741991_1167 (size=5679) 2024-12-05T12:04:33,006 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073741991_1167 (size=5679) 2024-12-05T12:04:33,007 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=5, memsize=132, hasBloomFilter=true, into tmp file hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testExportWithResetTtl/a39a77147c9cacfb540811cfb8825dbb/.tmp/cf/71a1748c8ae147d4ab8a825ba6753445 2024-12-05T12:04:33,012 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073741992_1168 (size=14714) 2024-12-05T12:04:33,014 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073741992_1168 (size=14714) 2024-12-05T12:04:33,014 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073741992_1168 (size=14714) 2024-12-05T12:04:33,014 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testExportWithResetTtl/a39a77147c9cacfb540811cfb8825dbb/.tmp/cf/71a1748c8ae147d4ab8a825ba6753445 as hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testExportWithResetTtl/a39a77147c9cacfb540811cfb8825dbb/cf/71a1748c8ae147d4ab8a825ba6753445 2024-12-05T12:04:33,020 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=5, memsize=3.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testExportWithResetTtl/2b8ebcd03601de43e74d160c9632e879/.tmp/cf/7acd4144714440c0be555e733325ec9a 2024-12-05T12:04:33,021 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testExportWithResetTtl/a39a77147c9cacfb540811cfb8825dbb/cf/71a1748c8ae147d4ab8a825ba6753445, entries=2, sequenceid=5, filesize=5.5 K 2024-12-05T12:04:33,023 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] regionserver.HRegion(3140): Finished flush of dataSize ~132 B/132, heapSize ~528 B/528, currentSize=0 B/0 for a39a77147c9cacfb540811cfb8825dbb in 85ms, sequenceid=5, compaction requested=false 2024-12-05T12:04:33,023 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testExportWithResetTtl' 2024-12-05T12:04:33,024 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] regionserver.HRegion(2603): Flush status journal for a39a77147c9cacfb540811cfb8825dbb: 2024-12-05T12:04:33,024 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] regionserver.SnapshotRegionCallable(75): Snapshotting region testExportWithResetTtl,,1733400272069.a39a77147c9cacfb540811cfb8825dbb. for snaptb-testExportWithResetTtl completed. 2024-12-05T12:04:33,024 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] snapshot.SnapshotManifest(241): Storing 'testExportWithResetTtl,,1733400272069.a39a77147c9cacfb540811cfb8825dbb.' region-info for snapshot=snaptb-testExportWithResetTtl 2024-12-05T12:04:33,024 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-05T12:04:33,024 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testExportWithResetTtl/a39a77147c9cacfb540811cfb8825dbb/cf/71a1748c8ae147d4ab8a825ba6753445] hfiles 2024-12-05T12:04:33,024 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testExportWithResetTtl/a39a77147c9cacfb540811cfb8825dbb/cf/71a1748c8ae147d4ab8a825ba6753445 for snapshot=snaptb-testExportWithResetTtl 2024-12-05T12:04:33,028 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testExportWithResetTtl/2b8ebcd03601de43e74d160c9632e879/.tmp/cf/7acd4144714440c0be555e733325ec9a as hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testExportWithResetTtl/2b8ebcd03601de43e74d160c9632e879/cf/7acd4144714440c0be555e733325ec9a 2024-12-05T12:04:33,034 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testExportWithResetTtl/2b8ebcd03601de43e74d160c9632e879/cf/7acd4144714440c0be555e733325ec9a, entries=48, sequenceid=5, filesize=14.4 K 2024-12-05T12:04:33,035 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] regionserver.HRegion(3140): Finished flush of dataSize ~3.13 KB/3204, heapSize ~6.98 KB/7152, currentSize=0 B/0 for 2b8ebcd03601de43e74d160c9632e879 in 99ms, sequenceid=5, compaction requested=false 2024-12-05T12:04:33,035 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] regionserver.HRegion(2603): Flush status journal for 2b8ebcd03601de43e74d160c9632e879: 2024-12-05T12:04:33,035 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] regionserver.SnapshotRegionCallable(75): Snapshotting region testExportWithResetTtl,1,1733400272069.2b8ebcd03601de43e74d160c9632e879. for snaptb-testExportWithResetTtl completed. 2024-12-05T12:04:33,035 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] snapshot.SnapshotManifest(241): Storing 'testExportWithResetTtl,1,1733400272069.2b8ebcd03601de43e74d160c9632e879.' region-info for snapshot=snaptb-testExportWithResetTtl 2024-12-05T12:04:33,035 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-05T12:04:33,036 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testExportWithResetTtl/2b8ebcd03601de43e74d160c9632e879/cf/7acd4144714440c0be555e733325ec9a] hfiles 2024-12-05T12:04:33,036 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testExportWithResetTtl/2b8ebcd03601de43e74d160c9632e879/cf/7acd4144714440c0be555e733325ec9a for snapshot=snaptb-testExportWithResetTtl 2024-12-05T12:04:33,040 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073741993_1169 (size=100) 2024-12-05T12:04:33,040 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073741993_1169 (size=100) 2024-12-05T12:04:33,041 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073741993_1169 (size=100) 2024-12-05T12:04:33,041 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testExportWithResetTtl,,1733400272069.a39a77147c9cacfb540811cfb8825dbb. 2024-12-05T12:04:33,041 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=80 2024-12-05T12:04:33,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] master.HMaster(4169): Remote procedure done, pid=80 2024-12-05T12:04:33,042 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb-testExportWithResetTtl on region a39a77147c9cacfb540811cfb8825dbb 2024-12-05T12:04:33,042 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=80, ppid=79, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure a39a77147c9cacfb540811cfb8825dbb 2024-12-05T12:04:33,045 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=80, ppid=79, state=SUCCESS, hasLock=false; SnapshotRegionProcedure a39a77147c9cacfb540811cfb8825dbb in 261 msec 2024-12-05T12:04:33,045 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073741994_1170 (size=100) 2024-12-05T12:04:33,046 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073741994_1170 (size=100) 2024-12-05T12:04:33,046 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073741994_1170 (size=100) 2024-12-05T12:04:33,046 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testExportWithResetTtl,1,1733400272069.2b8ebcd03601de43e74d160c9632e879. 2024-12-05T12:04:33,047 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=81 2024-12-05T12:04:33,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] master.HMaster(4169): Remote procedure done, pid=81 2024-12-05T12:04:33,047 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb-testExportWithResetTtl on region 2b8ebcd03601de43e74d160c9632e879 2024-12-05T12:04:33,047 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=81, ppid=79, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 2b8ebcd03601de43e74d160c9632e879 2024-12-05T12:04:33,051 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=81, resume processing ppid=79 2024-12-05T12:04:33,051 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=79, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=79, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-05T12:04:33,051 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=81, ppid=79, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 2b8ebcd03601de43e74d160c9632e879 in 267 msec 2024-12-05T12:04:33,052 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=79, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=79, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-05T12:04:33,053 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-12-05T12:04:33,053 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-12-05T12:04:33,053 DEBUG [MobRegionSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T12:04:33,054 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(366): Adding snapshot references for [hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/mobdir/data/default/testExportWithResetTtl/ea1f2913460bad1dc9f5d962c597c09d/cf/c4ca4238a0b923820dcc509a6f75849b2024120504432820bbbb4b41a97bd39e9ba80ba7_2b8ebcd03601de43e74d160c9632e879, hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/mobdir/data/default/testExportWithResetTtl/ea1f2913460bad1dc9f5d962c597c09d/cf/d41d8cd98f00b204e9800998ecf8427e20241205a85c7a2f26c845609620259cb88392a6_a39a77147c9cacfb540811cfb8825dbb] hfiles 2024-12-05T12:04:33,054 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (1/2): hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/mobdir/data/default/testExportWithResetTtl/ea1f2913460bad1dc9f5d962c597c09d/cf/c4ca4238a0b923820dcc509a6f75849b2024120504432820bbbb4b41a97bd39e9ba80ba7_2b8ebcd03601de43e74d160c9632e879 2024-12-05T12:04:33,054 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (2/2): hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/mobdir/data/default/testExportWithResetTtl/ea1f2913460bad1dc9f5d962c597c09d/cf/d41d8cd98f00b204e9800998ecf8427e20241205a85c7a2f26c845609620259cb88392a6_a39a77147c9cacfb540811cfb8825dbb 2024-12-05T12:04:33,075 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073741995_1171 (size=284) 2024-12-05T12:04:33,075 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073741995_1171 (size=284) 2024-12-05T12:04:33,076 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073741995_1171 (size=284) 2024-12-05T12:04:33,078 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=79, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=79, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-05T12:04:33,078 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb-testExportWithResetTtl 2024-12-05T12:04:33,079 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/.hbase-snapshot/.tmp/snaptb-testExportWithResetTtl 2024-12-05T12:04:33,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=79 2024-12-05T12:04:33,095 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073741996_1172 (size=923) 2024-12-05T12:04:33,096 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073741996_1172 (size=923) 2024-12-05T12:04:33,096 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073741996_1172 (size=923) 2024-12-05T12:04:33,100 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=79, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=79, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-05T12:04:33,106 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=79, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=79, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-05T12:04:33,107 DEBUG [PEWorker-4 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/.hbase-snapshot/.tmp/snaptb-testExportWithResetTtl to hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/.hbase-snapshot/snaptb-testExportWithResetTtl 2024-12-05T12:04:33,108 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=79, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=79, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_POST_OPERATION 2024-12-05T12:04:33,108 DEBUG [PEWorker-4 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 }, snapshot procedure id = 79 2024-12-05T12:04:33,110 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=79, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=79, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } in 340 msec 2024-12-05T12:04:33,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=79 2024-12-05T12:04:33,398 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testExportWithResetTtl completed 2024-12-05T12:04:33,410 INFO [Time-limited test {}] snapshot.TestExportSnapshot(515): HDFS export destination path: hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/export-test/export-1733400273410 2024-12-05T12:04:33,410 INFO [Time-limited test {}] snapshot.TestExportSnapshot(542): tgtFsUri=hdfs://localhost:37541, tgtDir=hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/export-test/export-1733400273410, rawTgtDir=hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/export-test/export-1733400273410, srcFsUri=hdfs://localhost:37541, srcDir=hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300 2024-12-05T12:04:33,454 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1094): inputFs=hdfs://localhost:37541, inputRoot=hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300 2024-12-05T12:04:33,454 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1095): outputFs=DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-954888707_22, ugi=jenkins (auth:SIMPLE)]], outputRoot=hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/export-test/export-1733400273410, skipTmp=false, initialOutputSnapshotDir=hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/export-test/export-1733400273410/.hbase-snapshot/.tmp/snaptb-testExportWithResetTtl 2024-12-05T12:04:33,457 INFO [Time-limited test {}] snapshot.ExportSnapshot(1104): Verify the source snapshot's expiration status and integrity. 2024-12-05T12:04:33,463 INFO [Time-limited test {}] snapshot.ExportSnapshot(1162): Copy Snapshot Manifest from hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/.hbase-snapshot/snaptb-testExportWithResetTtl to hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/export-test/export-1733400273410/.hbase-snapshot/.tmp/snaptb-testExportWithResetTtl 2024-12-05T12:04:33,496 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073741998_1174 (size=923) 2024-12-05T12:04:33,496 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073741998_1174 (size=923) 2024-12-05T12:04:33,497 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073741998_1174 (size=923) 2024-12-05T12:04:33,500 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073741997_1173 (size=143) 2024-12-05T12:04:33,500 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073741997_1173 (size=143) 2024-12-05T12:04:33,501 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073741997_1173 (size=143) 2024-12-05T12:04:33,510 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073741999_1175 (size=141) 2024-12-05T12:04:33,511 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073741999_1175 (size=141) 2024-12-05T12:04:33,512 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073741999_1175 (size=141) 2024-12-05T12:04:33,513 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-common/target/hbase-common-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-05T12:04:33,513 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-protocol-shaded/target/hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-05T12:04:33,513 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-client/target/hbase-client-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-05T12:04:34,054 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733400137807_0002_000001 (auth:SIMPLE) from 127.0.0.1:35824 2024-12-05T12:04:34,092 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_693627247/yarn-7091037402/MiniMRCluster_693627247-localDir-nm-0_1/usercache/jenkins/appcache/application_1733400137807_0002/container_1733400137807_0002_01_000001/launch_container.sh] 2024-12-05T12:04:34,092 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_693627247/yarn-7091037402/MiniMRCluster_693627247-localDir-nm-0_1/usercache/jenkins/appcache/application_1733400137807_0002/container_1733400137807_0002_01_000001/container_tokens] 2024-12-05T12:04:34,092 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_693627247/yarn-7091037402/MiniMRCluster_693627247-localDir-nm-0_1/usercache/jenkins/appcache/application_1733400137807_0002/container_1733400137807_0002_01_000001/sysfs] 2024-12-05T12:04:34,535 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/b25dcafd-a00c-c924-93f8-4d057d2ca201/hadoop-15084059507791814372.jar 2024-12-05T12:04:34,536 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-05T12:04:34,536 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-05T12:04:34,598 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/b25dcafd-a00c-c924-93f8-4d057d2ca201/hadoop-3536929043448912412.jar 2024-12-05T12:04:34,599 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics/target/hbase-metrics-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-05T12:04:34,599 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics-api/target/hbase-metrics-api-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-05T12:04:34,600 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-replication/target/hbase-replication-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-05T12:04:34,600 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-http/target/hbase-http-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-05T12:04:34,600 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-procedure/target/hbase-procedure-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-05T12:04:34,600 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-zookeeper/target/hbase-zookeeper-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-05T12:04:34,601 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-12-05T12:04:34,601 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-12-05T12:04:34,601 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-12-05T12:04:34,601 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-12-05T12:04:34,602 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-12-05T12:04:34,602 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-12-05T12:04:34,602 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-12-05T12:04:34,602 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-12-05T12:04:34,602 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-12-05T12:04:34,603 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-12-05T12:04:34,603 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-12-05T12:04:34,603 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-05T12:04:34,603 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-05T12:04:34,603 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-05T12:04:34,604 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-05T12:04:34,604 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-05T12:04:34,604 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-05T12:04:34,604 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-05T12:04:34,666 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742000_1176 (size=131440) 2024-12-05T12:04:34,666 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742000_1176 (size=131440) 2024-12-05T12:04:34,666 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742000_1176 (size=131440) 2024-12-05T12:04:35,096 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742001_1177 (size=4188619) 2024-12-05T12:04:35,096 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742001_1177 (size=4188619) 2024-12-05T12:04:35,097 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742001_1177 (size=4188619) 2024-12-05T12:04:35,121 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742002_1178 (size=1323991) 2024-12-05T12:04:35,121 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742002_1178 (size=1323991) 2024-12-05T12:04:35,121 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742002_1178 (size=1323991) 2024-12-05T12:04:35,530 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742003_1179 (size=903934) 2024-12-05T12:04:35,530 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742003_1179 (size=903934) 2024-12-05T12:04:35,531 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742003_1179 (size=903934) 2024-12-05T12:04:35,554 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-05T12:04:35,568 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742004_1180 (size=8360360) 2024-12-05T12:04:35,568 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742004_1180 (size=8360360) 2024-12-05T12:04:35,568 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742004_1180 (size=8360360) 2024-12-05T12:04:35,590 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742005_1181 (size=1877034) 2024-12-05T12:04:35,590 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742005_1181 (size=1877034) 2024-12-05T12:04:35,591 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742005_1181 (size=1877034) 2024-12-05T12:04:35,617 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742006_1182 (size=6425023) 2024-12-05T12:04:35,617 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742006_1182 (size=6425023) 2024-12-05T12:04:35,617 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742006_1182 (size=6425023) 2024-12-05T12:04:35,626 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742007_1183 (size=77835) 2024-12-05T12:04:35,626 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742007_1183 (size=77835) 2024-12-05T12:04:35,626 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742007_1183 (size=77835) 2024-12-05T12:04:35,641 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742008_1184 (size=443172) 2024-12-05T12:04:35,641 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742008_1184 (size=443172) 2024-12-05T12:04:35,642 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742008_1184 (size=443172) 2024-12-05T12:04:35,654 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742009_1185 (size=30949) 2024-12-05T12:04:35,654 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742009_1185 (size=30949) 2024-12-05T12:04:35,654 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742009_1185 (size=30949) 2024-12-05T12:04:35,666 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742010_1186 (size=1597213) 2024-12-05T12:04:35,667 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742010_1186 (size=1597213) 2024-12-05T12:04:35,667 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742010_1186 (size=1597213) 2024-12-05T12:04:35,685 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742011_1187 (size=4695811) 2024-12-05T12:04:35,686 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742011_1187 (size=4695811) 2024-12-05T12:04:35,686 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742011_1187 (size=4695811) 2024-12-05T12:04:35,698 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742012_1188 (size=232957) 2024-12-05T12:04:35,699 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742012_1188 (size=232957) 2024-12-05T12:04:35,699 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742012_1188 (size=232957) 2024-12-05T12:04:35,706 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742013_1189 (size=127628) 2024-12-05T12:04:35,706 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742013_1189 (size=127628) 2024-12-05T12:04:35,710 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742013_1189 (size=127628) 2024-12-05T12:04:35,719 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742014_1190 (size=20406) 2024-12-05T12:04:35,719 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742014_1190 (size=20406) 2024-12-05T12:04:35,720 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742014_1190 (size=20406) 2024-12-05T12:04:35,755 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742015_1191 (size=5175431) 2024-12-05T12:04:35,756 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742015_1191 (size=5175431) 2024-12-05T12:04:35,759 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742015_1191 (size=5175431) 2024-12-05T12:04:35,777 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742016_1192 (size=217634) 2024-12-05T12:04:35,777 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742016_1192 (size=217634) 2024-12-05T12:04:35,777 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742016_1192 (size=217634) 2024-12-05T12:04:36,206 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742017_1193 (size=1832290) 2024-12-05T12:04:36,207 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742017_1193 (size=1832290) 2024-12-05T12:04:36,207 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742017_1193 (size=1832290) 2024-12-05T12:04:36,221 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742018_1194 (size=322274) 2024-12-05T12:04:36,222 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742018_1194 (size=322274) 2024-12-05T12:04:36,222 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742018_1194 (size=322274) 2024-12-05T12:04:36,234 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742019_1195 (size=503880) 2024-12-05T12:04:36,235 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742019_1195 (size=503880) 2024-12-05T12:04:36,235 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742019_1195 (size=503880) 2024-12-05T12:04:36,248 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742020_1196 (size=29229) 2024-12-05T12:04:36,248 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742020_1196 (size=29229) 2024-12-05T12:04:36,248 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742020_1196 (size=29229) 2024-12-05T12:04:36,261 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742021_1197 (size=24096) 2024-12-05T12:04:36,261 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742021_1197 (size=24096) 2024-12-05T12:04:36,262 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742021_1197 (size=24096) 2024-12-05T12:04:36,274 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742022_1198 (size=111872) 2024-12-05T12:04:36,274 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742022_1198 (size=111872) 2024-12-05T12:04:36,275 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742022_1198 (size=111872) 2024-12-05T12:04:36,288 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742023_1199 (size=45609) 2024-12-05T12:04:36,288 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742023_1199 (size=45609) 2024-12-05T12:04:36,289 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742023_1199 (size=45609) 2024-12-05T12:04:36,299 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742024_1200 (size=136454) 2024-12-05T12:04:36,300 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742024_1200 (size=136454) 2024-12-05T12:04:36,300 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742024_1200 (size=136454) 2024-12-05T12:04:36,301 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-12-05T12:04:36,304 INFO [Time-limited test {}] snapshot.ExportSnapshot(663): Loading Snapshot 'snaptb-testExportWithResetTtl' hfile list 2024-12-05T12:04:36,308 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=0 size=14.4 K 2024-12-05T12:04:36,308 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=1 size=8.0 K 2024-12-05T12:04:36,309 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=2 size=5.5 K 2024-12-05T12:04:36,309 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=3 size=4.9 K 2024-12-05T12:04:36,316 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742025_1201 (size=995) 2024-12-05T12:04:36,316 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742025_1201 (size=995) 2024-12-05T12:04:36,317 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742025_1201 (size=995) 2024-12-05T12:04:36,328 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742026_1202 (size=35) 2024-12-05T12:04:36,328 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742026_1202 (size=35) 2024-12-05T12:04:36,328 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742026_1202 (size=35) 2024-12-05T12:04:36,355 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742027_1203 (size=303992) 2024-12-05T12:04:36,356 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742027_1203 (size=303992) 2024-12-05T12:04:36,356 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742027_1203 (size=303992) 2024-12-05T12:04:36,378 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-05T12:04:36,379 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-05T12:04:36,906 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733400137807_0003_000001 (auth:SIMPLE) from 127.0.0.1:34850 2024-12-05T12:04:38,981 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-05T12:04:40,361 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testExportWithResetTtl 2024-12-05T12:04:40,361 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testExportWithResetTtl Metrics about Tables on a single HBase RegionServer 2024-12-05T12:04:40,362 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithResetTtl 2024-12-05T12:04:40,362 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithResetTtl Metrics about Tables on a single HBase RegionServer 2024-12-05T12:04:42,515 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733400137807_0003_000001 (auth:SIMPLE) from 127.0.0.1:50358 2024-12-05T12:04:43,048 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742028_1204 (size=349690) 2024-12-05T12:04:43,049 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742028_1204 (size=349690) 2024-12-05T12:04:43,053 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742028_1204 (size=349690) 2024-12-05T12:04:44,822 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733400137807_0003_000001 (auth:SIMPLE) from 127.0.0.1:44964 2024-12-05T12:04:44,822 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733400137807_0003_000001 (auth:SIMPLE) from 127.0.0.1:49688 2024-12-05T12:04:45,715 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733400137807_0003_000001 (auth:SIMPLE) from 127.0.0.1:49694 2024-12-05T12:04:45,717 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733400137807_0003_000001 (auth:SIMPLE) from 127.0.0.1:44970 2024-12-05T12:04:48,042 WARN [NM Event dispatcher {}] containermanager.ContainerManagerImpl(1784): couldn't find container container_1733400137807_0003_01_000006 while processing FINISH_CONTAINERS event 2024-12-05T12:04:49,536 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742029_1205 (size=14714) 2024-12-05T12:04:49,536 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742029_1205 (size=14714) 2024-12-05T12:04:49,536 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742029_1205 (size=14714) 2024-12-05T12:04:50,781 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742031_1207 (size=8242) 2024-12-05T12:04:50,781 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742031_1207 (size=8242) 2024-12-05T12:04:50,781 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742031_1207 (size=8242) 2024-12-05T12:04:51,061 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_693627247/yarn-7091037402/MiniMRCluster_693627247-localDir-nm-0_1/usercache/jenkins/appcache/application_1733400137807_0003/container_1733400137807_0003_01_000003/launch_container.sh] 2024-12-05T12:04:51,061 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_693627247/yarn-7091037402/MiniMRCluster_693627247-localDir-nm-0_1/usercache/jenkins/appcache/application_1733400137807_0003/container_1733400137807_0003_01_000003/container_tokens] 2024-12-05T12:04:51,061 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_693627247/yarn-7091037402/MiniMRCluster_693627247-localDir-nm-0_1/usercache/jenkins/appcache/application_1733400137807_0003/container_1733400137807_0003_01_000003/sysfs] 2024-12-05T12:04:51,640 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742032_1208 (size=5679) 2024-12-05T12:04:51,640 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742032_1208 (size=5679) 2024-12-05T12:04:51,641 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742032_1208 (size=5679) 2024-12-05T12:04:51,802 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_693627247/yarn-7091037402/MiniMRCluster_693627247-localDir-nm-1_0/usercache/jenkins/appcache/application_1733400137807_0003/container_1733400137807_0003_01_000004/launch_container.sh] 2024-12-05T12:04:51,802 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_693627247/yarn-7091037402/MiniMRCluster_693627247-localDir-nm-1_0/usercache/jenkins/appcache/application_1733400137807_0003/container_1733400137807_0003_01_000004/container_tokens] 2024-12-05T12:04:51,802 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_693627247/yarn-7091037402/MiniMRCluster_693627247-localDir-nm-1_0/usercache/jenkins/appcache/application_1733400137807_0003/container_1733400137807_0003_01_000004/sysfs] 2024-12-05T12:04:51,843 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742033_1209 (size=5032) 2024-12-05T12:04:51,843 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742033_1209 (size=5032) 2024-12-05T12:04:51,843 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742033_1209 (size=5032) 2024-12-05T12:04:51,908 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742030_1206 (size=31708) 2024-12-05T12:04:51,909 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742030_1206 (size=31708) 2024-12-05T12:04:51,909 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742030_1206 (size=31708) 2024-12-05T12:04:51,942 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742034_1210 (size=462) 2024-12-05T12:04:51,943 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742034_1210 (size=462) 2024-12-05T12:04:51,943 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742034_1210 (size=462) 2024-12-05T12:04:51,981 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742035_1211 (size=31708) 2024-12-05T12:04:51,981 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742035_1211 (size=31708) 2024-12-05T12:04:51,982 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742035_1211 (size=31708) 2024-12-05T12:04:51,997 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_693627247/yarn-7091037402/MiniMRCluster_693627247-localDir-nm-0_0/usercache/jenkins/appcache/application_1733400137807_0003/container_1733400137807_0003_01_000005/launch_container.sh] 2024-12-05T12:04:51,997 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_693627247/yarn-7091037402/MiniMRCluster_693627247-localDir-nm-0_0/usercache/jenkins/appcache/application_1733400137807_0003/container_1733400137807_0003_01_000005/container_tokens] 2024-12-05T12:04:51,997 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_693627247/yarn-7091037402/MiniMRCluster_693627247-localDir-nm-0_0/usercache/jenkins/appcache/application_1733400137807_0003/container_1733400137807_0003_01_000005/sysfs] 2024-12-05T12:04:52,004 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742036_1212 (size=349690) 2024-12-05T12:04:52,005 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742036_1212 (size=349690) 2024-12-05T12:04:52,005 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742036_1212 (size=349690) 2024-12-05T12:04:52,017 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733400137807_0003_000001 (auth:SIMPLE) from 127.0.0.1:33692 2024-12-05T12:04:52,024 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733400137807_0003_000001 (auth:SIMPLE) from 127.0.0.1:46982 2024-12-05T12:04:53,540 INFO [Time-limited test {}] snapshot.ExportSnapshot(1219): Finalize the Snapshot Export 2024-12-05T12:04:53,541 INFO [Time-limited test {}] snapshot.ExportSnapshot(1230): Verify the exported snapshot's expiration status and integrity. 2024-12-05T12:04:53,548 INFO [Time-limited test {}] snapshot.ExportSnapshot(1236): Export Completed: snaptb-testExportWithResetTtl 2024-12-05T12:04:53,548 INFO [Time-limited test {}] snapshot.TestExportSnapshot(409): Exported snapshot 2024-12-05T12:04:53,549 INFO [Time-limited test {}] snapshot.TestExportSnapshot(420): Verified filesystem state 2024-12-05T12:04:53,549 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-954888707_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/.hbase-snapshot/snaptb-testExportWithResetTtl at hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/.hbase-snapshot/snaptb-testExportWithResetTtl 2024-12-05T12:04:53,550 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/.hbase-snapshot/snaptb-testExportWithResetTtl/.snapshotinfo 2024-12-05T12:04:53,550 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/.hbase-snapshot/snaptb-testExportWithResetTtl/data.manifest 2024-12-05T12:04:53,550 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-954888707_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/export-test/export-1733400273410/.hbase-snapshot/snaptb-testExportWithResetTtl at hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/export-test/export-1733400273410/.hbase-snapshot/snaptb-testExportWithResetTtl 2024-12-05T12:04:53,550 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/export-test/export-1733400273410/.hbase-snapshot/snaptb-testExportWithResetTtl/.snapshotinfo 2024-12-05T12:04:53,550 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/export-test/export-1733400273410/.hbase-snapshot/snaptb-testExportWithResetTtl/data.manifest 2024-12-05T12:04:53,558 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.2 disable testExportWithResetTtl 2024-12-05T12:04:53,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] procedure2.ProcedureExecutor(1139): Stored pid=82, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testExportWithResetTtl 2024-12-05T12:04:53,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=82 2024-12-05T12:04:53,564 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733400293564"}]},"ts":"1733400293564"} 2024-12-05T12:04:53,566 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=testExportWithResetTtl, state=DISABLING in hbase:meta 2024-12-05T12:04:53,566 INFO [PEWorker-3 {}] procedure.DisableTableProcedure(284): Set testExportWithResetTtl to state=DISABLING 2024-12-05T12:04:53,567 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=83, ppid=82, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testExportWithResetTtl}] 2024-12-05T12:04:53,568 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=84, ppid=83, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=a39a77147c9cacfb540811cfb8825dbb, UNASSIGN}, {pid=85, ppid=83, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=2b8ebcd03601de43e74d160c9632e879, UNASSIGN}] 2024-12-05T12:04:53,569 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=85, ppid=83, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=2b8ebcd03601de43e74d160c9632e879, UNASSIGN 2024-12-05T12:04:53,569 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=84, ppid=83, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=a39a77147c9cacfb540811cfb8825dbb, UNASSIGN 2024-12-05T12:04:53,570 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=85 updating hbase:meta row=2b8ebcd03601de43e74d160c9632e879, regionState=CLOSING, regionLocation=80666a11a09f,37629,1733400130958 2024-12-05T12:04:53,570 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=84 updating hbase:meta row=a39a77147c9cacfb540811cfb8825dbb, regionState=CLOSING, regionLocation=80666a11a09f,42913,1733400130845 2024-12-05T12:04:53,570 WARN [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34947 {}] assignment.AssignmentManager(1543): Unable to acquire lock for regionNode state=CLOSING, location=80666a11a09f,37629,1733400130958, table=testExportWithResetTtl, region=2b8ebcd03601de43e74d160c9632e879. It is likely that another thread is currently holding the lock. To avoid deadlock, skip execution for now. 2024-12-05T12:04:53,572 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=84, ppid=83, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testExportWithResetTtl, region=a39a77147c9cacfb540811cfb8825dbb, UNASSIGN because future has completed 2024-12-05T12:04:53,572 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-05T12:04:53,572 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=86, ppid=84, state=RUNNABLE, hasLock=false; CloseRegionProcedure a39a77147c9cacfb540811cfb8825dbb, server=80666a11a09f,42913,1733400130845}] 2024-12-05T12:04:53,572 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=85, ppid=83, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testExportWithResetTtl, region=2b8ebcd03601de43e74d160c9632e879, UNASSIGN because future has completed 2024-12-05T12:04:53,573 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-05T12:04:53,573 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=87, ppid=85, state=RUNNABLE, hasLock=false; CloseRegionProcedure 2b8ebcd03601de43e74d160c9632e879, server=80666a11a09f,37629,1733400130958}] 2024-12-05T12:04:53,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=82 2024-12-05T12:04:53,725 INFO [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] handler.UnassignRegionHandler(122): Close a39a77147c9cacfb540811cfb8825dbb 2024-12-05T12:04:53,725 DEBUG [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-05T12:04:53,725 DEBUG [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] regionserver.HRegion(1722): Closing a39a77147c9cacfb540811cfb8825dbb, disabling compactions & flushes 2024-12-05T12:04:53,725 INFO [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] regionserver.HRegion(1755): Closing region testExportWithResetTtl,,1733400272069.a39a77147c9cacfb540811cfb8825dbb. 2024-12-05T12:04:53,725 DEBUG [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] regionserver.HRegion(1776): Time limited wait for close lock on testExportWithResetTtl,,1733400272069.a39a77147c9cacfb540811cfb8825dbb. 2024-12-05T12:04:53,725 DEBUG [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] regionserver.HRegion(1843): Acquired close lock on testExportWithResetTtl,,1733400272069.a39a77147c9cacfb540811cfb8825dbb. after waiting 0 ms 2024-12-05T12:04:53,725 INFO [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=87}] handler.UnassignRegionHandler(122): Close 2b8ebcd03601de43e74d160c9632e879 2024-12-05T12:04:53,725 DEBUG [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] regionserver.HRegion(1853): Updates disabled for region testExportWithResetTtl,,1733400272069.a39a77147c9cacfb540811cfb8825dbb. 2024-12-05T12:04:53,726 DEBUG [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=87}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-05T12:04:53,726 DEBUG [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=87}] regionserver.HRegion(1722): Closing 2b8ebcd03601de43e74d160c9632e879, disabling compactions & flushes 2024-12-05T12:04:53,726 INFO [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=87}] regionserver.HRegion(1755): Closing region testExportWithResetTtl,1,1733400272069.2b8ebcd03601de43e74d160c9632e879. 2024-12-05T12:04:53,726 DEBUG [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=87}] regionserver.HRegion(1776): Time limited wait for close lock on testExportWithResetTtl,1,1733400272069.2b8ebcd03601de43e74d160c9632e879. 2024-12-05T12:04:53,726 DEBUG [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=87}] regionserver.HRegion(1843): Acquired close lock on testExportWithResetTtl,1,1733400272069.2b8ebcd03601de43e74d160c9632e879. after waiting 0 ms 2024-12-05T12:04:53,726 DEBUG [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=87}] regionserver.HRegion(1853): Updates disabled for region testExportWithResetTtl,1,1733400272069.2b8ebcd03601de43e74d160c9632e879. 2024-12-05T12:04:53,733 DEBUG [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testExportWithResetTtl/a39a77147c9cacfb540811cfb8825dbb/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-12-05T12:04:53,734 DEBUG [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-05T12:04:53,734 DEBUG [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=87}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testExportWithResetTtl/2b8ebcd03601de43e74d160c9632e879/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-12-05T12:04:53,734 INFO [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] regionserver.HRegion(1973): Closed testExportWithResetTtl,,1733400272069.a39a77147c9cacfb540811cfb8825dbb. 2024-12-05T12:04:53,734 DEBUG [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] regionserver.HRegion(1676): Region close journal for a39a77147c9cacfb540811cfb8825dbb: Waiting for close lock at 1733400293725Running coprocessor pre-close hooks at 1733400293725Disabling compacts and flushes for region at 1733400293725Disabling writes for close at 1733400293725Writing region close event to WAL at 1733400293726 (+1 ms)Running coprocessor post-close hooks at 1733400293734 (+8 ms)Closed at 1733400293734 2024-12-05T12:04:53,735 DEBUG [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=87}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-05T12:04:53,735 INFO [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=87}] regionserver.HRegion(1973): Closed testExportWithResetTtl,1,1733400272069.2b8ebcd03601de43e74d160c9632e879. 2024-12-05T12:04:53,735 DEBUG [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=87}] regionserver.HRegion(1676): Region close journal for 2b8ebcd03601de43e74d160c9632e879: Waiting for close lock at 1733400293726Running coprocessor pre-close hooks at 1733400293726Disabling compacts and flushes for region at 1733400293726Disabling writes for close at 1733400293726Writing region close event to WAL at 1733400293727 (+1 ms)Running coprocessor post-close hooks at 1733400293735 (+8 ms)Closed at 1733400293735 2024-12-05T12:04:53,736 INFO [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] handler.UnassignRegionHandler(157): Closed a39a77147c9cacfb540811cfb8825dbb 2024-12-05T12:04:53,737 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=84 updating hbase:meta row=a39a77147c9cacfb540811cfb8825dbb, regionState=CLOSED 2024-12-05T12:04:53,737 INFO [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=87}] handler.UnassignRegionHandler(157): Closed 2b8ebcd03601de43e74d160c9632e879 2024-12-05T12:04:53,738 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=85 updating hbase:meta row=2b8ebcd03601de43e74d160c9632e879, regionState=CLOSED 2024-12-05T12:04:53,739 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=86, ppid=84, state=RUNNABLE, hasLock=false; CloseRegionProcedure a39a77147c9cacfb540811cfb8825dbb, server=80666a11a09f,42913,1733400130845 because future has completed 2024-12-05T12:04:53,739 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=87, ppid=85, state=RUNNABLE, hasLock=false; CloseRegionProcedure 2b8ebcd03601de43e74d160c9632e879, server=80666a11a09f,37629,1733400130958 because future has completed 2024-12-05T12:04:53,744 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=86, resume processing ppid=84 2024-12-05T12:04:53,745 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=86, ppid=84, state=SUCCESS, hasLock=false; CloseRegionProcedure a39a77147c9cacfb540811cfb8825dbb, server=80666a11a09f,42913,1733400130845 in 169 msec 2024-12-05T12:04:53,745 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=87, resume processing ppid=85 2024-12-05T12:04:53,745 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=87, ppid=85, state=SUCCESS, hasLock=false; CloseRegionProcedure 2b8ebcd03601de43e74d160c9632e879, server=80666a11a09f,37629,1733400130958 in 169 msec 2024-12-05T12:04:53,746 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=84, ppid=83, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=a39a77147c9cacfb540811cfb8825dbb, UNASSIGN in 176 msec 2024-12-05T12:04:53,747 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=85, resume processing ppid=83 2024-12-05T12:04:53,747 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=85, ppid=83, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=2b8ebcd03601de43e74d160c9632e879, UNASSIGN in 177 msec 2024-12-05T12:04:53,752 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=83, resume processing ppid=82 2024-12-05T12:04:53,752 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=83, ppid=82, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testExportWithResetTtl in 182 msec 2024-12-05T12:04:53,760 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733400293759"}]},"ts":"1733400293759"} 2024-12-05T12:04:53,762 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=testExportWithResetTtl, state=DISABLED in hbase:meta 2024-12-05T12:04:53,762 INFO [PEWorker-3 {}] procedure.DisableTableProcedure(296): Set testExportWithResetTtl to state=DISABLED 2024-12-05T12:04:53,765 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=82, state=SUCCESS, hasLock=false; DisableTableProcedure table=testExportWithResetTtl in 205 msec 2024-12-05T12:04:53,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=82 2024-12-05T12:04:53,878 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testExportWithResetTtl completed 2024-12-05T12:04:53,879 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.2 delete testExportWithResetTtl 2024-12-05T12:04:53,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] procedure2.ProcedureExecutor(1139): Stored pid=88, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testExportWithResetTtl 2024-12-05T12:04:53,881 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=88, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testExportWithResetTtl 2024-12-05T12:04:53,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] access.PermissionStorage(261): Removing permissions of removed table testExportWithResetTtl 2024-12-05T12:04:53,882 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=88, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testExportWithResetTtl 2024-12-05T12:04:53,886 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42305 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testExportWithResetTtl 2024-12-05T12:04:53,888 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testExportWithResetTtl/2b8ebcd03601de43e74d160c9632e879 2024-12-05T12:04:53,888 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testExportWithResetTtl/a39a77147c9cacfb540811cfb8825dbb 2024-12-05T12:04:53,890 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testExportWithResetTtl/2b8ebcd03601de43e74d160c9632e879/cf, FileablePath, hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testExportWithResetTtl/2b8ebcd03601de43e74d160c9632e879/recovered.edits] 2024-12-05T12:04:53,890 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testExportWithResetTtl/a39a77147c9cacfb540811cfb8825dbb/cf, FileablePath, hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testExportWithResetTtl/a39a77147c9cacfb540811cfb8825dbb/recovered.edits] 2024-12-05T12:04:53,896 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testExportWithResetTtl/a39a77147c9cacfb540811cfb8825dbb/cf/71a1748c8ae147d4ab8a825ba6753445 to hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/archive/data/default/testExportWithResetTtl/a39a77147c9cacfb540811cfb8825dbb/cf/71a1748c8ae147d4ab8a825ba6753445 2024-12-05T12:04:53,898 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testExportWithResetTtl/2b8ebcd03601de43e74d160c9632e879/cf/7acd4144714440c0be555e733325ec9a to hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/archive/data/default/testExportWithResetTtl/2b8ebcd03601de43e74d160c9632e879/cf/7acd4144714440c0be555e733325ec9a 2024-12-05T12:04:53,901 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testExportWithResetTtl/a39a77147c9cacfb540811cfb8825dbb/recovered.edits/8.seqid to hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/archive/data/default/testExportWithResetTtl/a39a77147c9cacfb540811cfb8825dbb/recovered.edits/8.seqid 2024-12-05T12:04:53,902 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testExportWithResetTtl/a39a77147c9cacfb540811cfb8825dbb 2024-12-05T12:04:53,902 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testExportWithResetTtl/2b8ebcd03601de43e74d160c9632e879/recovered.edits/8.seqid to hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/archive/data/default/testExportWithResetTtl/2b8ebcd03601de43e74d160c9632e879/recovered.edits/8.seqid 2024-12-05T12:04:53,902 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testExportWithResetTtl/2b8ebcd03601de43e74d160c9632e879 2024-12-05T12:04:53,902 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(313): Archived testExportWithResetTtl regions 2024-12-05T12:04:53,903 DEBUG [PEWorker-5 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/mobdir/data/default/testExportWithResetTtl/ea1f2913460bad1dc9f5d962c597c09d 2024-12-05T12:04:53,904 DEBUG [PEWorker-5 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/mobdir/data/default/testExportWithResetTtl/ea1f2913460bad1dc9f5d962c597c09d/cf] 2024-12-05T12:04:53,908 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/mobdir/data/default/testExportWithResetTtl/ea1f2913460bad1dc9f5d962c597c09d/cf/c4ca4238a0b923820dcc509a6f75849b2024120504432820bbbb4b41a97bd39e9ba80ba7_2b8ebcd03601de43e74d160c9632e879 to hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/archive/data/default/testExportWithResetTtl/ea1f2913460bad1dc9f5d962c597c09d/cf/c4ca4238a0b923820dcc509a6f75849b2024120504432820bbbb4b41a97bd39e9ba80ba7_2b8ebcd03601de43e74d160c9632e879 2024-12-05T12:04:53,910 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/mobdir/data/default/testExportWithResetTtl/ea1f2913460bad1dc9f5d962c597c09d/cf/d41d8cd98f00b204e9800998ecf8427e20241205a85c7a2f26c845609620259cb88392a6_a39a77147c9cacfb540811cfb8825dbb to hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/archive/data/default/testExportWithResetTtl/ea1f2913460bad1dc9f5d962c597c09d/cf/d41d8cd98f00b204e9800998ecf8427e20241205a85c7a2f26c845609620259cb88392a6_a39a77147c9cacfb540811cfb8825dbb 2024-12-05T12:04:53,910 DEBUG [PEWorker-5 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/mobdir/data/default/testExportWithResetTtl/ea1f2913460bad1dc9f5d962c597c09d 2024-12-05T12:04:53,913 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=88, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testExportWithResetTtl 2024-12-05T12:04:53,917 WARN [PEWorker-5 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 2 rows of testExportWithResetTtl from hbase:meta 2024-12-05T12:04:53,920 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(407): Removing 'testExportWithResetTtl' descriptor. 2024-12-05T12:04:53,922 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=88, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testExportWithResetTtl 2024-12-05T12:04:53,922 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(397): Removing 'testExportWithResetTtl' from region states. 2024-12-05T12:04:53,923 DEBUG [PEWorker-5 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testExportWithResetTtl,,1733400272069.a39a77147c9cacfb540811cfb8825dbb.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733400293922"}]},"ts":"9223372036854775807"} 2024-12-05T12:04:53,923 DEBUG [PEWorker-5 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testExportWithResetTtl,1,1733400272069.2b8ebcd03601de43e74d160c9632e879.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733400293922"}]},"ts":"9223372036854775807"} 2024-12-05T12:04:53,925 INFO [PEWorker-5 {}] assignment.RegionStateStore(562): Deleted 2 regions from META 2024-12-05T12:04:53,925 DEBUG [PEWorker-5 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => a39a77147c9cacfb540811cfb8825dbb, NAME => 'testExportWithResetTtl,,1733400272069.a39a77147c9cacfb540811cfb8825dbb.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => 2b8ebcd03601de43e74d160c9632e879, NAME => 'testExportWithResetTtl,1,1733400272069.2b8ebcd03601de43e74d160c9632e879.', STARTKEY => '1', ENDKEY => ''}] 2024-12-05T12:04:53,926 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(401): Marking 'testExportWithResetTtl' as deleted. 2024-12-05T12:04:53,926 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733400293926"}]},"ts":"9223372036854775807"} 2024-12-05T12:04:53,928 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(867): Deleted table testExportWithResetTtl state from META 2024-12-05T12:04:53,929 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(133): Finished pid=88, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testExportWithResetTtl 2024-12-05T12:04:53,931 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=88, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testExportWithResetTtl in 50 msec 2024-12-05T12:04:53,934 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34947-0x101a6a890970000, quorum=127.0.0.1:49240, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testExportWithResetTtl 2024-12-05T12:04:53,934 DEBUG [pool-63-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42913-0x101a6a890970001, quorum=127.0.0.1:49240, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testExportWithResetTtl 2024-12-05T12:04:53,934 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37629-0x101a6a890970002, quorum=127.0.0.1:49240, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testExportWithResetTtl 2024-12-05T12:04:53,934 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42305-0x101a6a890970003, quorum=127.0.0.1:49240, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testExportWithResetTtl 2024-12-05T12:04:53,935 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportWithResetTtl with data PBUF 2024-12-05T12:04:53,935 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportWithResetTtl with data PBUF 2024-12-05T12:04:53,935 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportWithResetTtl with data PBUF 2024-12-05T12:04:53,936 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportWithResetTtl with data PBUF 2024-12-05T12:04:53,942 DEBUG [pool-63-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42913-0x101a6a890970001, quorum=127.0.0.1:49240, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testExportWithResetTtl 2024-12-05T12:04:53,942 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37629-0x101a6a890970002, quorum=127.0.0.1:49240, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testExportWithResetTtl 2024-12-05T12:04:53,942 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37629-0x101a6a890970002, quorum=127.0.0.1:49240, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-05T12:04:53,942 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34947-0x101a6a890970000, quorum=127.0.0.1:49240, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testExportWithResetTtl 2024-12-05T12:04:53,942 DEBUG [pool-63-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42913-0x101a6a890970001, quorum=127.0.0.1:49240, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-05T12:04:53,942 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34947-0x101a6a890970000, quorum=127.0.0.1:49240, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-05T12:04:53,943 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42305-0x101a6a890970003, quorum=127.0.0.1:49240, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testExportWithResetTtl 2024-12-05T12:04:53,943 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42305-0x101a6a890970003, quorum=127.0.0.1:49240, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-05T12:04:53,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=88 2024-12-05T12:04:53,944 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-05T12:04:53,944 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testExportWithResetTtl 2024-12-05T12:04:53,944 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-05T12:04:53,944 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testExportWithResetTtl completed 2024-12-05T12:04:53,945 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-05T12:04:53,945 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.2 disable testtb-testExportWithResetTtl 2024-12-05T12:04:53,946 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-05T12:04:53,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] procedure2.ProcedureExecutor(1139): Stored pid=89, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testtb-testExportWithResetTtl 2024-12-05T12:04:53,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=89 2024-12-05T12:04:53,951 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733400293951"}]},"ts":"1733400293951"} 2024-12-05T12:04:53,953 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithResetTtl, state=DISABLING in hbase:meta 2024-12-05T12:04:53,953 INFO [PEWorker-1 {}] procedure.DisableTableProcedure(284): Set testtb-testExportWithResetTtl to state=DISABLING 2024-12-05T12:04:53,955 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=90, ppid=89, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportWithResetTtl}] 2024-12-05T12:04:53,957 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=91, ppid=90, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=181d56aec8b6c6c419fc4e8e3161c451, UNASSIGN}, {pid=92, ppid=90, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=3ca2163992a160fdd5ff463a6a484b7f, UNASSIGN}] 2024-12-05T12:04:53,958 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=91, ppid=90, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=181d56aec8b6c6c419fc4e8e3161c451, UNASSIGN 2024-12-05T12:04:53,958 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=92, ppid=90, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=3ca2163992a160fdd5ff463a6a484b7f, UNASSIGN 2024-12-05T12:04:53,959 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=91 updating hbase:meta row=181d56aec8b6c6c419fc4e8e3161c451, regionState=CLOSING, regionLocation=80666a11a09f,37629,1733400130958 2024-12-05T12:04:53,959 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=92 updating hbase:meta row=3ca2163992a160fdd5ff463a6a484b7f, regionState=CLOSING, regionLocation=80666a11a09f,42305,1733400131012 2024-12-05T12:04:53,964 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=92, ppid=90, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=3ca2163992a160fdd5ff463a6a484b7f, UNASSIGN because future has completed 2024-12-05T12:04:53,965 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=91, ppid=90, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=181d56aec8b6c6c419fc4e8e3161c451, UNASSIGN because future has completed 2024-12-05T12:04:53,977 WARN [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34947 {}] assignment.AssignmentManager(1543): Unable to acquire lock for regionNode state=CLOSING, location=80666a11a09f,37629,1733400130958, table=testtb-testExportWithResetTtl, region=181d56aec8b6c6c419fc4e8e3161c451. It is likely that another thread is currently holding the lock. To avoid deadlock, skip execution for now. 2024-12-05T12:04:53,978 DEBUG [PEWorker-1 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-05T12:04:53,978 DEBUG [PEWorker-5 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-05T12:04:53,979 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=93, ppid=92, state=RUNNABLE, hasLock=false; CloseRegionProcedure 3ca2163992a160fdd5ff463a6a484b7f, server=80666a11a09f,42305,1733400131012}] 2024-12-05T12:04:53,979 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=94, ppid=91, state=RUNNABLE, hasLock=false; CloseRegionProcedure 181d56aec8b6c6c419fc4e8e3161c451, server=80666a11a09f,37629,1733400130958}] 2024-12-05T12:04:54,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=89 2024-12-05T12:04:54,132 INFO [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=93}] handler.UnassignRegionHandler(122): Close 3ca2163992a160fdd5ff463a6a484b7f 2024-12-05T12:04:54,132 DEBUG [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=93}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-05T12:04:54,132 DEBUG [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=93}] regionserver.HRegion(1722): Closing 3ca2163992a160fdd5ff463a6a484b7f, disabling compactions & flushes 2024-12-05T12:04:54,132 INFO [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=93}] regionserver.HRegion(1755): Closing region testtb-testExportWithResetTtl,1,1733400270367.3ca2163992a160fdd5ff463a6a484b7f. 2024-12-05T12:04:54,132 DEBUG [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=93}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithResetTtl,1,1733400270367.3ca2163992a160fdd5ff463a6a484b7f. 2024-12-05T12:04:54,132 DEBUG [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=93}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithResetTtl,1,1733400270367.3ca2163992a160fdd5ff463a6a484b7f. after waiting 0 ms 2024-12-05T12:04:54,132 DEBUG [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=93}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithResetTtl,1,1733400270367.3ca2163992a160fdd5ff463a6a484b7f. 2024-12-05T12:04:54,134 INFO [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] handler.UnassignRegionHandler(122): Close 181d56aec8b6c6c419fc4e8e3161c451 2024-12-05T12:04:54,134 DEBUG [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-05T12:04:54,134 DEBUG [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.HRegion(1722): Closing 181d56aec8b6c6c419fc4e8e3161c451, disabling compactions & flushes 2024-12-05T12:04:54,134 INFO [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.HRegion(1755): Closing region testtb-testExportWithResetTtl,,1733400270367.181d56aec8b6c6c419fc4e8e3161c451. 2024-12-05T12:04:54,134 DEBUG [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithResetTtl,,1733400270367.181d56aec8b6c6c419fc4e8e3161c451. 2024-12-05T12:04:54,134 DEBUG [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithResetTtl,,1733400270367.181d56aec8b6c6c419fc4e8e3161c451. after waiting 0 ms 2024-12-05T12:04:54,135 DEBUG [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithResetTtl,,1733400270367.181d56aec8b6c6c419fc4e8e3161c451. 2024-12-05T12:04:54,140 DEBUG [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportWithResetTtl/181d56aec8b6c6c419fc4e8e3161c451/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-05T12:04:54,140 DEBUG [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-05T12:04:54,140 INFO [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.HRegion(1973): Closed testtb-testExportWithResetTtl,,1733400270367.181d56aec8b6c6c419fc4e8e3161c451. 2024-12-05T12:04:54,140 DEBUG [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.HRegion(1676): Region close journal for 181d56aec8b6c6c419fc4e8e3161c451: Waiting for close lock at 1733400294134Running coprocessor pre-close hooks at 1733400294134Disabling compacts and flushes for region at 1733400294134Disabling writes for close at 1733400294135 (+1 ms)Writing region close event to WAL at 1733400294135Running coprocessor post-close hooks at 1733400294140 (+5 ms)Closed at 1733400294140 2024-12-05T12:04:54,142 DEBUG [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=93}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportWithResetTtl/3ca2163992a160fdd5ff463a6a484b7f/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-05T12:04:54,142 INFO [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] handler.UnassignRegionHandler(157): Closed 181d56aec8b6c6c419fc4e8e3161c451 2024-12-05T12:04:54,143 DEBUG [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=93}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-05T12:04:54,143 INFO [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=93}] regionserver.HRegion(1973): Closed testtb-testExportWithResetTtl,1,1733400270367.3ca2163992a160fdd5ff463a6a484b7f. 2024-12-05T12:04:54,143 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=91 updating hbase:meta row=181d56aec8b6c6c419fc4e8e3161c451, regionState=CLOSED 2024-12-05T12:04:54,143 DEBUG [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=93}] regionserver.HRegion(1676): Region close journal for 3ca2163992a160fdd5ff463a6a484b7f: Waiting for close lock at 1733400294132Running coprocessor pre-close hooks at 1733400294132Disabling compacts and flushes for region at 1733400294132Disabling writes for close at 1733400294132Writing region close event to WAL at 1733400294133 (+1 ms)Running coprocessor post-close hooks at 1733400294143 (+10 ms)Closed at 1733400294143 2024-12-05T12:04:54,145 INFO [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=93}] handler.UnassignRegionHandler(157): Closed 3ca2163992a160fdd5ff463a6a484b7f 2024-12-05T12:04:54,146 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=92 updating hbase:meta row=3ca2163992a160fdd5ff463a6a484b7f, regionState=CLOSED 2024-12-05T12:04:54,146 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=94, ppid=91, state=RUNNABLE, hasLock=false; CloseRegionProcedure 181d56aec8b6c6c419fc4e8e3161c451, server=80666a11a09f,37629,1733400130958 because future has completed 2024-12-05T12:04:54,148 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=93, ppid=92, state=RUNNABLE, hasLock=false; CloseRegionProcedure 3ca2163992a160fdd5ff463a6a484b7f, server=80666a11a09f,42305,1733400131012 because future has completed 2024-12-05T12:04:54,150 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=94, resume processing ppid=91 2024-12-05T12:04:54,151 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=94, ppid=91, state=SUCCESS, hasLock=false; CloseRegionProcedure 181d56aec8b6c6c419fc4e8e3161c451, server=80666a11a09f,37629,1733400130958 in 168 msec 2024-12-05T12:04:54,153 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=93, resume processing ppid=92 2024-12-05T12:04:54,153 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=91, ppid=90, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=181d56aec8b6c6c419fc4e8e3161c451, UNASSIGN in 194 msec 2024-12-05T12:04:54,153 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=93, ppid=92, state=SUCCESS, hasLock=false; CloseRegionProcedure 3ca2163992a160fdd5ff463a6a484b7f, server=80666a11a09f,42305,1733400131012 in 170 msec 2024-12-05T12:04:54,156 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=92, resume processing ppid=90 2024-12-05T12:04:54,156 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=92, ppid=90, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=3ca2163992a160fdd5ff463a6a484b7f, UNASSIGN in 197 msec 2024-12-05T12:04:54,158 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=90, resume processing ppid=89 2024-12-05T12:04:54,158 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=90, ppid=89, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportWithResetTtl in 201 msec 2024-12-05T12:04:54,160 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733400294160"}]},"ts":"1733400294160"} 2024-12-05T12:04:54,162 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithResetTtl, state=DISABLED in hbase:meta 2024-12-05T12:04:54,162 INFO [PEWorker-5 {}] procedure.DisableTableProcedure(296): Set testtb-testExportWithResetTtl to state=DISABLED 2024-12-05T12:04:54,164 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=89, state=SUCCESS, hasLock=false; DisableTableProcedure table=testtb-testExportWithResetTtl in 218 msec 2024-12-05T12:04:54,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=89 2024-12-05T12:04:54,269 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testtb-testExportWithResetTtl completed 2024-12-05T12:04:54,270 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.2 delete testtb-testExportWithResetTtl 2024-12-05T12:04:54,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] procedure2.ProcedureExecutor(1139): Stored pid=95, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testtb-testExportWithResetTtl 2024-12-05T12:04:54,273 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=95, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportWithResetTtl 2024-12-05T12:04:54,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] access.PermissionStorage(261): Removing permissions of removed table testtb-testExportWithResetTtl 2024-12-05T12:04:54,274 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=95, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testtb-testExportWithResetTtl 2024-12-05T12:04:54,279 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42305 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testtb-testExportWithResetTtl 2024-12-05T12:04:54,282 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportWithResetTtl/181d56aec8b6c6c419fc4e8e3161c451 2024-12-05T12:04:54,285 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportWithResetTtl/181d56aec8b6c6c419fc4e8e3161c451/cf, FileablePath, hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportWithResetTtl/181d56aec8b6c6c419fc4e8e3161c451/recovered.edits] 2024-12-05T12:04:54,287 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportWithResetTtl/3ca2163992a160fdd5ff463a6a484b7f 2024-12-05T12:04:54,289 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportWithResetTtl/3ca2163992a160fdd5ff463a6a484b7f/cf, FileablePath, hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportWithResetTtl/3ca2163992a160fdd5ff463a6a484b7f/recovered.edits] 2024-12-05T12:04:54,292 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportWithResetTtl/181d56aec8b6c6c419fc4e8e3161c451/cf/4b9330b1548b484d94a60918aa464f41 to hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/archive/data/default/testtb-testExportWithResetTtl/181d56aec8b6c6c419fc4e8e3161c451/cf/4b9330b1548b484d94a60918aa464f41 2024-12-05T12:04:54,294 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportWithResetTtl/3ca2163992a160fdd5ff463a6a484b7f/cf/67115f2d25c145c8969cff5182eaeb81 to hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/archive/data/default/testtb-testExportWithResetTtl/3ca2163992a160fdd5ff463a6a484b7f/cf/67115f2d25c145c8969cff5182eaeb81 2024-12-05T12:04:54,298 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportWithResetTtl/181d56aec8b6c6c419fc4e8e3161c451/recovered.edits/9.seqid to hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/archive/data/default/testtb-testExportWithResetTtl/181d56aec8b6c6c419fc4e8e3161c451/recovered.edits/9.seqid 2024-12-05T12:04:54,305 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportWithResetTtl/181d56aec8b6c6c419fc4e8e3161c451 2024-12-05T12:04:54,307 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportWithResetTtl/3ca2163992a160fdd5ff463a6a484b7f/recovered.edits/9.seqid to hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/archive/data/default/testtb-testExportWithResetTtl/3ca2163992a160fdd5ff463a6a484b7f/recovered.edits/9.seqid 2024-12-05T12:04:54,308 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportWithResetTtl/3ca2163992a160fdd5ff463a6a484b7f 2024-12-05T12:04:54,308 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(313): Archived testtb-testExportWithResetTtl regions 2024-12-05T12:04:54,308 DEBUG [PEWorker-1 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/mobdir/data/default/testtb-testExportWithResetTtl/1bf72cc28ee4e494fb2ee93a7ed9fe6e 2024-12-05T12:04:54,309 DEBUG [PEWorker-1 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/mobdir/data/default/testtb-testExportWithResetTtl/1bf72cc28ee4e494fb2ee93a7ed9fe6e/cf] 2024-12-05T12:04:54,315 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/mobdir/data/default/testtb-testExportWithResetTtl/1bf72cc28ee4e494fb2ee93a7ed9fe6e/cf/c4ca4238a0b923820dcc509a6f75849b2024120585221d021d304b3fb6693e723adedc52_3ca2163992a160fdd5ff463a6a484b7f to hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/archive/data/default/testtb-testExportWithResetTtl/1bf72cc28ee4e494fb2ee93a7ed9fe6e/cf/c4ca4238a0b923820dcc509a6f75849b2024120585221d021d304b3fb6693e723adedc52_3ca2163992a160fdd5ff463a6a484b7f 2024-12-05T12:04:54,317 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/mobdir/data/default/testtb-testExportWithResetTtl/1bf72cc28ee4e494fb2ee93a7ed9fe6e/cf/d41d8cd98f00b204e9800998ecf8427e202412054285b164b5b54185ba7eb48adf47fe4c_181d56aec8b6c6c419fc4e8e3161c451 to hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/archive/data/default/testtb-testExportWithResetTtl/1bf72cc28ee4e494fb2ee93a7ed9fe6e/cf/d41d8cd98f00b204e9800998ecf8427e202412054285b164b5b54185ba7eb48adf47fe4c_181d56aec8b6c6c419fc4e8e3161c451 2024-12-05T12:04:54,317 DEBUG [PEWorker-1 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/mobdir/data/default/testtb-testExportWithResetTtl/1bf72cc28ee4e494fb2ee93a7ed9fe6e 2024-12-05T12:04:54,320 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=95, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testtb-testExportWithResetTtl 2024-12-05T12:04:54,324 WARN [PEWorker-1 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 2 rows of testtb-testExportWithResetTtl from hbase:meta 2024-12-05T12:04:54,327 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(407): Removing 'testtb-testExportWithResetTtl' descriptor. 2024-12-05T12:04:54,330 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=95, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testtb-testExportWithResetTtl 2024-12-05T12:04:54,330 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(397): Removing 'testtb-testExportWithResetTtl' from region states. 2024-12-05T12:04:54,330 DEBUG [PEWorker-1 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportWithResetTtl,,1733400270367.181d56aec8b6c6c419fc4e8e3161c451.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733400294330"}]},"ts":"9223372036854775807"} 2024-12-05T12:04:54,330 DEBUG [PEWorker-1 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportWithResetTtl,1,1733400270367.3ca2163992a160fdd5ff463a6a484b7f.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733400294330"}]},"ts":"9223372036854775807"} 2024-12-05T12:04:54,333 INFO [PEWorker-1 {}] assignment.RegionStateStore(562): Deleted 2 regions from META 2024-12-05T12:04:54,333 DEBUG [PEWorker-1 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => 181d56aec8b6c6c419fc4e8e3161c451, NAME => 'testtb-testExportWithResetTtl,,1733400270367.181d56aec8b6c6c419fc4e8e3161c451.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => 3ca2163992a160fdd5ff463a6a484b7f, NAME => 'testtb-testExportWithResetTtl,1,1733400270367.3ca2163992a160fdd5ff463a6a484b7f.', STARTKEY => '1', ENDKEY => ''}] 2024-12-05T12:04:54,334 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(401): Marking 'testtb-testExportWithResetTtl' as deleted. 2024-12-05T12:04:54,334 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testtb-testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733400294334"}]},"ts":"9223372036854775807"} 2024-12-05T12:04:54,336 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(867): Deleted table testtb-testExportWithResetTtl state from META 2024-12-05T12:04:54,337 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(133): Finished pid=95, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportWithResetTtl 2024-12-05T12:04:54,338 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=95, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testtb-testExportWithResetTtl in 67 msec 2024-12-05T12:04:54,367 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42305-0x101a6a890970003, quorum=127.0.0.1:49240, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithResetTtl 2024-12-05T12:04:54,367 DEBUG [pool-63-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42913-0x101a6a890970001, quorum=127.0.0.1:49240, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithResetTtl 2024-12-05T12:04:54,367 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34947-0x101a6a890970000, quorum=127.0.0.1:49240, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithResetTtl 2024-12-05T12:04:54,367 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37629-0x101a6a890970002, quorum=127.0.0.1:49240, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithResetTtl 2024-12-05T12:04:54,368 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF 2024-12-05T12:04:54,368 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF 2024-12-05T12:04:54,368 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF 2024-12-05T12:04:54,369 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF 2024-12-05T12:04:54,376 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37629-0x101a6a890970002, quorum=127.0.0.1:49240, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithResetTtl 2024-12-05T12:04:54,376 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34947-0x101a6a890970000, quorum=127.0.0.1:49240, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithResetTtl 2024-12-05T12:04:54,376 DEBUG [pool-63-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42913-0x101a6a890970001, quorum=127.0.0.1:49240, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithResetTtl 2024-12-05T12:04:54,376 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37629-0x101a6a890970002, quorum=127.0.0.1:49240, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-05T12:04:54,376 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34947-0x101a6a890970000, quorum=127.0.0.1:49240, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-05T12:04:54,376 DEBUG [pool-63-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42913-0x101a6a890970001, quorum=127.0.0.1:49240, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-05T12:04:54,376 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42305-0x101a6a890970003, quorum=127.0.0.1:49240, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithResetTtl 2024-12-05T12:04:54,376 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42305-0x101a6a890970003, quorum=127.0.0.1:49240, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-05T12:04:54,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=95 2024-12-05T12:04:54,378 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testtb-testExportWithResetTtl 2024-12-05T12:04:54,378 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testtb-testExportWithResetTtl completed 2024-12-05T12:04:54,388 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "emptySnaptb0-testExportWithResetTtl" type: DISABLED 2024-12-05T12:04:54,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] snapshot.SnapshotManager(381): Deleting snapshot: emptySnaptb0-testExportWithResetTtl 2024-12-05T12:04:54,392 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "snaptb-testExportWithResetTtl" type: DISABLED 2024-12-05T12:04:54,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] snapshot.SnapshotManager(381): Deleting snapshot: snaptb-testExportWithResetTtl 2024-12-05T12:04:54,397 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "snaptb0-testExportWithResetTtl" type: DISABLED 2024-12-05T12:04:54,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] snapshot.SnapshotManager(381): Deleting snapshot: snaptb0-testExportWithResetTtl 2024-12-05T12:04:54,425 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestMobSecureExportSnapshot#testExportWithResetTtl Thread=803 (was 798) Potentially hanging thread: ApplicationMasterLauncher #3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-954888707_22 at /127.0.0.1:41822 [Waiting for operation #5] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Thread-3158 java.base@17.0.11/java.io.FileInputStream.readBytes(Native Method) java.base@17.0.11/java.io.FileInputStream.read(FileInputStream.java:276) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:282) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/sun.nio.cs.StreamDecoder.readBytes(StreamDecoder.java:281) java.base@17.0.11/sun.nio.cs.StreamDecoder.implRead(StreamDecoder.java:324) java.base@17.0.11/sun.nio.cs.StreamDecoder.read(StreamDecoder.java:189) java.base@17.0.11/java.io.InputStreamReader.read(InputStreamReader.java:177) java.base@17.0.11/java.io.BufferedReader.fill(BufferedReader.java:162) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:329) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:396) app//org.apache.hadoop.util.Shell$1.run(Shell.java:1025) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:39835 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LogDeleter #1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1177) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:46337 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:40863 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1055630025) connection to localhost/127.0.0.1:46337 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: HFileArchiver-10 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-9 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1055630025) connection to localhost/127.0.0.1:40863 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: HFileArchiver-8 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-954888707_22 at /127.0.0.1:41044 [Waiting for operation #5] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_546826306_1 at /127.0.0.1:41786 [Waiting for operation #3] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-11 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: process reaper (pid 134366) java.base@17.0.11/java.lang.ProcessHandleImpl.waitForProcessExit0(Native Method) java.base@17.0.11/java.lang.ProcessHandleImpl$1.run(ProcessHandleImpl.java:150) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-954888707_22 at /127.0.0.1:49396 [Waiting for operation #4] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LogDeleter #1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1177) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=814 (was 815), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=802 (was 694) - SystemLoadAverage LEAK? -, ProcessCount=19 (was 16) - ProcessCount LEAK? -, AvailableMemoryMB=2101 (was 2598) 2024-12-05T12:04:54,425 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=803 is superior to 500 2024-12-05T12:04:54,446 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestMobSecureExportSnapshot#testExportFileSystemState Thread=803, OpenFileDescriptor=814, MaxFileDescriptor=1048576, SystemLoadAverage=802, ProcessCount=19, AvailableMemoryMB=2099 2024-12-05T12:04:54,447 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=803 is superior to 500 2024-12-05T12:04:54,448 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'testtb-testExportFileSystemState', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-05T12:04:54,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] procedure2.ProcedureExecutor(1139): Stored pid=96, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testtb-testExportFileSystemState 2024-12-05T12:04:54,451 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=96, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemState execute state=CREATE_TABLE_PRE_OPERATION 2024-12-05T12:04:54,452 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testExportFileSystemState" procId is: 96 2024-12-05T12:04:54,453 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=96, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemState execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-05T12:04:54,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=96 2024-12-05T12:04:54,467 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742037_1213 (size=443) 2024-12-05T12:04:54,467 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742037_1213 (size=443) 2024-12-05T12:04:54,467 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742037_1213 (size=443) 2024-12-05T12:04:54,471 INFO [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => 6ef92a5f61de09d8e11a7a0b018f01dc, NAME => 'testtb-testExportFileSystemState,1,1733400294448.6ef92a5f61de09d8e11a7a0b018f01dc.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testExportFileSystemState', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300 2024-12-05T12:04:54,471 INFO [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 41a242823f4e7d8b34700dd3c99d5f0e, NAME => 'testtb-testExportFileSystemState,,1733400294448.41a242823f4e7d8b34700dd3c99d5f0e.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testExportFileSystemState', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300 2024-12-05T12:04:54,496 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742039_1215 (size=68) 2024-12-05T12:04:54,496 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742039_1215 (size=68) 2024-12-05T12:04:54,496 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742039_1215 (size=68) 2024-12-05T12:04:54,497 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemState,1,1733400294448.6ef92a5f61de09d8e11a7a0b018f01dc.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-05T12:04:54,497 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(1722): Closing 6ef92a5f61de09d8e11a7a0b018f01dc, disabling compactions & flushes 2024-12-05T12:04:54,497 INFO [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemState,1,1733400294448.6ef92a5f61de09d8e11a7a0b018f01dc. 2024-12-05T12:04:54,497 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemState,1,1733400294448.6ef92a5f61de09d8e11a7a0b018f01dc. 2024-12-05T12:04:54,497 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemState,1,1733400294448.6ef92a5f61de09d8e11a7a0b018f01dc. after waiting 0 ms 2024-12-05T12:04:54,497 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemState,1,1733400294448.6ef92a5f61de09d8e11a7a0b018f01dc. 2024-12-05T12:04:54,497 INFO [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemState,1,1733400294448.6ef92a5f61de09d8e11a7a0b018f01dc. 2024-12-05T12:04:54,497 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(1676): Region close journal for 6ef92a5f61de09d8e11a7a0b018f01dc: Waiting for close lock at 1733400294497Disabling compacts and flushes for region at 1733400294497Disabling writes for close at 1733400294497Writing region close event to WAL at 1733400294497Closed at 1733400294497 2024-12-05T12:04:54,504 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742038_1214 (size=68) 2024-12-05T12:04:54,505 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742038_1214 (size=68) 2024-12-05T12:04:54,505 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742038_1214 (size=68) 2024-12-05T12:04:54,506 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemState,,1733400294448.41a242823f4e7d8b34700dd3c99d5f0e.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-05T12:04:54,506 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(1722): Closing 41a242823f4e7d8b34700dd3c99d5f0e, disabling compactions & flushes 2024-12-05T12:04:54,506 INFO [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemState,,1733400294448.41a242823f4e7d8b34700dd3c99d5f0e. 2024-12-05T12:04:54,506 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemState,,1733400294448.41a242823f4e7d8b34700dd3c99d5f0e. 2024-12-05T12:04:54,506 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemState,,1733400294448.41a242823f4e7d8b34700dd3c99d5f0e. after waiting 0 ms 2024-12-05T12:04:54,506 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemState,,1733400294448.41a242823f4e7d8b34700dd3c99d5f0e. 2024-12-05T12:04:54,506 INFO [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemState,,1733400294448.41a242823f4e7d8b34700dd3c99d5f0e. 2024-12-05T12:04:54,506 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(1676): Region close journal for 41a242823f4e7d8b34700dd3c99d5f0e: Waiting for close lock at 1733400294506Disabling compacts and flushes for region at 1733400294506Disabling writes for close at 1733400294506Writing region close event to WAL at 1733400294506Closed at 1733400294506 2024-12-05T12:04:54,507 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=96, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemState execute state=CREATE_TABLE_ADD_TO_META 2024-12-05T12:04:54,508 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportFileSystemState,1,1733400294448.6ef92a5f61de09d8e11a7a0b018f01dc.","families":{"info":[{"qualifier":"regioninfo","vlen":67,"tag":[],"timestamp":"1733400294507"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733400294507"}]},"ts":"1733400294507"} 2024-12-05T12:04:54,508 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportFileSystemState,,1733400294448.41a242823f4e7d8b34700dd3c99d5f0e.","families":{"info":[{"qualifier":"regioninfo","vlen":67,"tag":[],"timestamp":"1733400294507"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733400294507"}]},"ts":"1733400294507"} 2024-12-05T12:04:54,510 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-12-05T12:04:54,511 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=96, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemState execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-05T12:04:54,511 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733400294511"}]},"ts":"1733400294511"} 2024-12-05T12:04:54,513 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemState, state=ENABLING in hbase:meta 2024-12-05T12:04:54,513 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(204): Hosts are {80666a11a09f=0} racks are {/default-rack=0} 2024-12-05T12:04:54,514 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-05T12:04:54,515 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-05T12:04:54,515 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-05T12:04:54,515 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-05T12:04:54,515 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-05T12:04:54,515 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-05T12:04:54,515 INFO [PEWorker-4 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-05T12:04:54,515 INFO [PEWorker-4 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-05T12:04:54,515 INFO [PEWorker-4 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-05T12:04:54,515 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-05T12:04:54,515 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=97, ppid=96, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=41a242823f4e7d8b34700dd3c99d5f0e, ASSIGN}, {pid=98, ppid=96, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=6ef92a5f61de09d8e11a7a0b018f01dc, ASSIGN}] 2024-12-05T12:04:54,517 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=97, ppid=96, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=41a242823f4e7d8b34700dd3c99d5f0e, ASSIGN 2024-12-05T12:04:54,517 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=98, ppid=96, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=6ef92a5f61de09d8e11a7a0b018f01dc, ASSIGN 2024-12-05T12:04:54,518 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=98, ppid=96, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=6ef92a5f61de09d8e11a7a0b018f01dc, ASSIGN; state=OFFLINE, location=80666a11a09f,42913,1733400130845; forceNewPlan=false, retain=false 2024-12-05T12:04:54,518 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(269): Starting pid=97, ppid=96, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=41a242823f4e7d8b34700dd3c99d5f0e, ASSIGN; state=OFFLINE, location=80666a11a09f,42305,1733400131012; forceNewPlan=false, retain=false 2024-12-05T12:04:54,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=96 2024-12-05T12:04:54,668 INFO [80666a11a09f:34947 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-12-05T12:04:54,668 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=98 updating hbase:meta row=6ef92a5f61de09d8e11a7a0b018f01dc, regionState=OPENING, regionLocation=80666a11a09f,42913,1733400130845 2024-12-05T12:04:54,668 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=97 updating hbase:meta row=41a242823f4e7d8b34700dd3c99d5f0e, regionState=OPENING, regionLocation=80666a11a09f,42305,1733400131012 2024-12-05T12:04:54,670 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=98, ppid=96, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=6ef92a5f61de09d8e11a7a0b018f01dc, ASSIGN because future has completed 2024-12-05T12:04:54,671 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=99, ppid=98, state=RUNNABLE, hasLock=false; OpenRegionProcedure 6ef92a5f61de09d8e11a7a0b018f01dc, server=80666a11a09f,42913,1733400130845}] 2024-12-05T12:04:54,671 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=97, ppid=96, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=41a242823f4e7d8b34700dd3c99d5f0e, ASSIGN because future has completed 2024-12-05T12:04:54,675 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=100, ppid=97, state=RUNNABLE, hasLock=false; OpenRegionProcedure 41a242823f4e7d8b34700dd3c99d5f0e, server=80666a11a09f,42305,1733400131012}] 2024-12-05T12:04:54,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=96 2024-12-05T12:04:54,831 INFO [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] handler.AssignRegionHandler(132): Open testtb-testExportFileSystemState,1,1733400294448.6ef92a5f61de09d8e11a7a0b018f01dc. 2024-12-05T12:04:54,831 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegion(7752): Opening region: {ENCODED => 6ef92a5f61de09d8e11a7a0b018f01dc, NAME => 'testtb-testExportFileSystemState,1,1733400294448.6ef92a5f61de09d8e11a7a0b018f01dc.', STARTKEY => '1', ENDKEY => ''} 2024-12-05T12:04:54,831 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportFileSystemState,1,1733400294448.6ef92a5f61de09d8e11a7a0b018f01dc. service=AccessControlService 2024-12-05T12:04:54,831 INFO [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-05T12:04:54,832 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemState 6ef92a5f61de09d8e11a7a0b018f01dc 2024-12-05T12:04:54,832 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemState,1,1733400294448.6ef92a5f61de09d8e11a7a0b018f01dc.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-05T12:04:54,832 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegion(7794): checking encryption for 6ef92a5f61de09d8e11a7a0b018f01dc 2024-12-05T12:04:54,832 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegion(7797): checking classloading for 6ef92a5f61de09d8e11a7a0b018f01dc 2024-12-05T12:04:54,833 INFO [StoreOpener-6ef92a5f61de09d8e11a7a0b018f01dc-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 6ef92a5f61de09d8e11a7a0b018f01dc 2024-12-05T12:04:54,833 INFO [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] handler.AssignRegionHandler(132): Open testtb-testExportFileSystemState,,1733400294448.41a242823f4e7d8b34700dd3c99d5f0e. 2024-12-05T12:04:54,834 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(7752): Opening region: {ENCODED => 41a242823f4e7d8b34700dd3c99d5f0e, NAME => 'testtb-testExportFileSystemState,,1733400294448.41a242823f4e7d8b34700dd3c99d5f0e.', STARTKEY => '', ENDKEY => '1'} 2024-12-05T12:04:54,834 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportFileSystemState,,1733400294448.41a242823f4e7d8b34700dd3c99d5f0e. service=AccessControlService 2024-12-05T12:04:54,834 INFO [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-05T12:04:54,834 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemState 41a242823f4e7d8b34700dd3c99d5f0e 2024-12-05T12:04:54,834 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemState,,1733400294448.41a242823f4e7d8b34700dd3c99d5f0e.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-05T12:04:54,834 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(7794): checking encryption for 41a242823f4e7d8b34700dd3c99d5f0e 2024-12-05T12:04:54,835 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(7797): checking classloading for 41a242823f4e7d8b34700dd3c99d5f0e 2024-12-05T12:04:54,835 INFO [StoreOpener-6ef92a5f61de09d8e11a7a0b018f01dc-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 6ef92a5f61de09d8e11a7a0b018f01dc columnFamilyName cf 2024-12-05T12:04:54,836 INFO [StoreOpener-41a242823f4e7d8b34700dd3c99d5f0e-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 41a242823f4e7d8b34700dd3c99d5f0e 2024-12-05T12:04:54,836 DEBUG [StoreOpener-6ef92a5f61de09d8e11a7a0b018f01dc-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T12:04:54,837 INFO [StoreOpener-6ef92a5f61de09d8e11a7a0b018f01dc-1 {}] regionserver.HStore(327): Store=6ef92a5f61de09d8e11a7a0b018f01dc/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-05T12:04:54,837 INFO [StoreOpener-41a242823f4e7d8b34700dd3c99d5f0e-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 41a242823f4e7d8b34700dd3c99d5f0e columnFamilyName cf 2024-12-05T12:04:54,837 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegion(1038): replaying wal for 6ef92a5f61de09d8e11a7a0b018f01dc 2024-12-05T12:04:54,838 DEBUG [StoreOpener-41a242823f4e7d8b34700dd3c99d5f0e-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T12:04:54,838 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportFileSystemState/6ef92a5f61de09d8e11a7a0b018f01dc 2024-12-05T12:04:54,838 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportFileSystemState/6ef92a5f61de09d8e11a7a0b018f01dc 2024-12-05T12:04:54,838 INFO [StoreOpener-41a242823f4e7d8b34700dd3c99d5f0e-1 {}] regionserver.HStore(327): Store=41a242823f4e7d8b34700dd3c99d5f0e/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-05T12:04:54,838 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegion(1048): stopping wal replay for 6ef92a5f61de09d8e11a7a0b018f01dc 2024-12-05T12:04:54,839 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegion(1060): Cleaning up temporary data for 6ef92a5f61de09d8e11a7a0b018f01dc 2024-12-05T12:04:54,839 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(1038): replaying wal for 41a242823f4e7d8b34700dd3c99d5f0e 2024-12-05T12:04:54,839 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportFileSystemState/41a242823f4e7d8b34700dd3c99d5f0e 2024-12-05T12:04:54,840 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportFileSystemState/41a242823f4e7d8b34700dd3c99d5f0e 2024-12-05T12:04:54,840 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(1048): stopping wal replay for 41a242823f4e7d8b34700dd3c99d5f0e 2024-12-05T12:04:54,840 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(1060): Cleaning up temporary data for 41a242823f4e7d8b34700dd3c99d5f0e 2024-12-05T12:04:54,841 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegion(1093): writing seq id for 6ef92a5f61de09d8e11a7a0b018f01dc 2024-12-05T12:04:54,841 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(1093): writing seq id for 41a242823f4e7d8b34700dd3c99d5f0e 2024-12-05T12:04:54,843 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportFileSystemState/6ef92a5f61de09d8e11a7a0b018f01dc/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-05T12:04:54,844 INFO [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegion(1114): Opened 6ef92a5f61de09d8e11a7a0b018f01dc; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=58948726, jitterRate=-0.12159553170204163}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-05T12:04:54,844 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 6ef92a5f61de09d8e11a7a0b018f01dc 2024-12-05T12:04:54,845 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportFileSystemState/41a242823f4e7d8b34700dd3c99d5f0e/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-05T12:04:54,845 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegion(1006): Region open journal for 6ef92a5f61de09d8e11a7a0b018f01dc: Running coprocessor pre-open hook at 1733400294832Writing region info on filesystem at 1733400294832Initializing all the Stores at 1733400294833 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733400294833Cleaning up temporary data from old regions at 1733400294839 (+6 ms)Running coprocessor post-open hooks at 1733400294844 (+5 ms)Region opened successfully at 1733400294845 (+1 ms) 2024-12-05T12:04:54,846 INFO [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(1114): Opened 41a242823f4e7d8b34700dd3c99d5f0e; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=65164567, jitterRate=-0.028972283005714417}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-05T12:04:54,846 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 41a242823f4e7d8b34700dd3c99d5f0e 2024-12-05T12:04:54,846 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(1006): Region open journal for 41a242823f4e7d8b34700dd3c99d5f0e: Running coprocessor pre-open hook at 1733400294835Writing region info on filesystem at 1733400294835Initializing all the Stores at 1733400294835Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733400294835Cleaning up temporary data from old regions at 1733400294840 (+5 ms)Running coprocessor post-open hooks at 1733400294846 (+6 ms)Region opened successfully at 1733400294846 2024-12-05T12:04:54,847 INFO [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportFileSystemState,1,1733400294448.6ef92a5f61de09d8e11a7a0b018f01dc., pid=99, masterSystemTime=1733400294827 2024-12-05T12:04:54,847 INFO [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportFileSystemState,,1733400294448.41a242823f4e7d8b34700dd3c99d5f0e., pid=100, masterSystemTime=1733400294831 2024-12-05T12:04:54,849 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportFileSystemState,1,1733400294448.6ef92a5f61de09d8e11a7a0b018f01dc. 2024-12-05T12:04:54,849 INFO [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] handler.AssignRegionHandler(153): Opened testtb-testExportFileSystemState,1,1733400294448.6ef92a5f61de09d8e11a7a0b018f01dc. 2024-12-05T12:04:54,850 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=98 updating hbase:meta row=6ef92a5f61de09d8e11a7a0b018f01dc, regionState=OPEN, openSeqNum=2, regionLocation=80666a11a09f,42913,1733400130845 2024-12-05T12:04:54,851 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportFileSystemState,,1733400294448.41a242823f4e7d8b34700dd3c99d5f0e. 2024-12-05T12:04:54,851 INFO [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] handler.AssignRegionHandler(153): Opened testtb-testExportFileSystemState,,1733400294448.41a242823f4e7d8b34700dd3c99d5f0e. 2024-12-05T12:04:54,855 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=97 updating hbase:meta row=41a242823f4e7d8b34700dd3c99d5f0e, regionState=OPEN, openSeqNum=2, regionLocation=80666a11a09f,42305,1733400131012 2024-12-05T12:04:54,855 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=99, ppid=98, state=RUNNABLE, hasLock=false; OpenRegionProcedure 6ef92a5f61de09d8e11a7a0b018f01dc, server=80666a11a09f,42913,1733400130845 because future has completed 2024-12-05T12:04:54,857 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=100, ppid=97, state=RUNNABLE, hasLock=false; OpenRegionProcedure 41a242823f4e7d8b34700dd3c99d5f0e, server=80666a11a09f,42305,1733400131012 because future has completed 2024-12-05T12:04:54,860 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=99, resume processing ppid=98 2024-12-05T12:04:54,860 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=99, ppid=98, state=SUCCESS, hasLock=false; OpenRegionProcedure 6ef92a5f61de09d8e11a7a0b018f01dc, server=80666a11a09f,42913,1733400130845 in 186 msec 2024-12-05T12:04:54,862 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=100, resume processing ppid=97 2024-12-05T12:04:54,862 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=98, ppid=96, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=6ef92a5f61de09d8e11a7a0b018f01dc, ASSIGN in 345 msec 2024-12-05T12:04:54,862 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=100, ppid=97, state=SUCCESS, hasLock=false; OpenRegionProcedure 41a242823f4e7d8b34700dd3c99d5f0e, server=80666a11a09f,42305,1733400131012 in 184 msec 2024-12-05T12:04:54,863 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=97, resume processing ppid=96 2024-12-05T12:04:54,864 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=97, ppid=96, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=41a242823f4e7d8b34700dd3c99d5f0e, ASSIGN in 347 msec 2024-12-05T12:04:54,864 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=96, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemState execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-05T12:04:54,864 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733400294864"}]},"ts":"1733400294864"} 2024-12-05T12:04:54,866 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemState, state=ENABLED in hbase:meta 2024-12-05T12:04:54,867 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=96, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemState execute state=CREATE_TABLE_POST_OPERATION 2024-12-05T12:04:54,868 DEBUG [PEWorker-2 {}] access.PermissionStorage(177): Writing permission with rowKey testtb-testExportFileSystemState jenkins: RWXCA 2024-12-05T12:04:54,872 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42305 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemState], kv [jenkins: RWXCA] 2024-12-05T12:04:54,881 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34947-0x101a6a890970000, quorum=127.0.0.1:49240, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-05T12:04:54,881 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37629-0x101a6a890970002, quorum=127.0.0.1:49240, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-05T12:04:54,881 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42305-0x101a6a890970003, quorum=127.0.0.1:49240, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-05T12:04:54,881 DEBUG [pool-63-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42913-0x101a6a890970001, quorum=127.0.0.1:49240, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-05T12:04:54,890 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemState with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportFileSystemState \x00 \x01 \x02 \x03 \x04 2024-12-05T12:04:54,890 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemState with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportFileSystemState \x00 \x01 \x02 \x03 \x04 2024-12-05T12:04:54,890 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemState with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportFileSystemState \x00 \x01 \x02 \x03 \x04 2024-12-05T12:04:54,890 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemState with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportFileSystemState \x00 \x01 \x02 \x03 \x04 2024-12-05T12:04:54,892 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=96, state=SUCCESS, hasLock=false; CreateTableProcedure table=testtb-testExportFileSystemState in 441 msec 2024-12-05T12:04:54,945 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_693627247/yarn-7091037402/MiniMRCluster_693627247-localDir-nm-1_1/usercache/jenkins/appcache/application_1733400137807_0003/container_1733400137807_0003_01_000002/launch_container.sh] 2024-12-05T12:04:54,945 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_693627247/yarn-7091037402/MiniMRCluster_693627247-localDir-nm-1_1/usercache/jenkins/appcache/application_1733400137807_0003/container_1733400137807_0003_01_000002/container_tokens] 2024-12-05T12:04:54,945 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_693627247/yarn-7091037402/MiniMRCluster_693627247-localDir-nm-1_1/usercache/jenkins/appcache/application_1733400137807_0003/container_1733400137807_0003_01_000002/sysfs] 2024-12-05T12:04:55,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=96 2024-12-05T12:04:55,078 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testtb-testExportFileSystemState completed 2024-12-05T12:04:55,078 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemState,, stopping at row=testtb-testExportFileSystemState ,, for max=2147483647 with caching=100 2024-12-05T12:04:55,081 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testExportFileSystemState 2024-12-05T12:04:55,081 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testExportFileSystemState,,1733400294448.41a242823f4e7d8b34700dd3c99d5f0e. 2024-12-05T12:04:55,081 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-05T12:04:55,083 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemState,, stopping at row=testtb-testExportFileSystemState ,, for max=2147483647 with caching=100 2024-12-05T12:04:55,084 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportFileSystemState' 2024-12-05T12:04:55,090 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemState,, stopping at row=testtb-testExportFileSystemState ,, for max=2147483647 with caching=100 2024-12-05T12:04:55,098 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemState,, stopping at row=testtb-testExportFileSystemState ,, for max=2147483647 with caching=100 2024-12-05T12:04:55,101 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } 2024-12-05T12:04:55,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1733400295101 (current time:1733400295101). 2024-12-05T12:04:55,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-05T12:04:55,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot emptySnaptb0-testExportFileSystemState VERSION not specified, setting to 2 2024-12-05T12:04:55,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-05T12:04:55,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@313b5d17, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T12:04:55,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] client.ClusterIdFetcher(90): Going to request 80666a11a09f,34947,-1 for getting cluster id 2024-12-05T12:04:55,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-05T12:04:55,103 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'af3bc8f5-5d92-4279-9ca6-4b73c59b8f54' 2024-12-05T12:04:55,103 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-05T12:04:55,104 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "af3bc8f5-5d92-4279-9ca6-4b73c59b8f54" 2024-12-05T12:04:55,104 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2aa02af, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T12:04:55,104 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [80666a11a09f,34947,-1] 2024-12-05T12:04:55,104 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-05T12:04:55,104 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T12:04:55,105 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47342, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-05T12:04:55,106 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@708445b1, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T12:04:55,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-05T12:04:55,107 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=80666a11a09f,37629,1733400130958, seqNum=-1] 2024-12-05T12:04:55,108 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-05T12:04:55,108 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55438, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-05T12:04:55,110 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947. 2024-12-05T12:04:55,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-05T12:04:55,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T12:04:55,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T12:04:55,110 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-05T12:04:55,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@19a4f880, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T12:04:55,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] client.ClusterIdFetcher(90): Going to request 80666a11a09f,34947,-1 for getting cluster id 2024-12-05T12:04:55,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-05T12:04:55,113 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'af3bc8f5-5d92-4279-9ca6-4b73c59b8f54' 2024-12-05T12:04:55,113 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-05T12:04:55,113 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "af3bc8f5-5d92-4279-9ca6-4b73c59b8f54" 2024-12-05T12:04:55,113 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@710123c5, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T12:04:55,113 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [80666a11a09f,34947,-1] 2024-12-05T12:04:55,114 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-05T12:04:55,114 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T12:04:55,115 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47370, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-05T12:04:55,115 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@34d25a35, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T12:04:55,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-05T12:04:55,117 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=80666a11a09f,37629,1733400130958, seqNum=-1] 2024-12-05T12:04:55,118 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-05T12:04:55,119 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55442, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-05T12:04:55,120 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportFileSystemState', locateType=CURRENT is [region=hbase:acl,,1733400133831.02119c1ced80736cbd0ab02ca19ee30c., hostname=80666a11a09f,42305,1733400131012, seqNum=2] 2024-12-05T12:04:55,121 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-05T12:04:55,123 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48344, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-05T12:04:55,124 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947. 2024-12-05T12:04:55,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor153.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-05T12:04:55,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T12:04:55,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T12:04:55,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemState], kv [jenkins: RWXCA] 2024-12-05T12:04:55,125 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-05T12:04:55,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-05T12:04:55,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] procedure2.ProcedureExecutor(1139): Stored pid=101, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=101, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } 2024-12-05T12:04:55,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 }, snapshot procedure id = 101 2024-12-05T12:04:55,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=101 2024-12-05T12:04:55,128 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=101, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=101, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-05T12:04:55,129 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=101, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=101, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-05T12:04:55,133 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=101, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=101, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-05T12:04:55,141 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742040_1216 (size=170) 2024-12-05T12:04:55,141 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742040_1216 (size=170) 2024-12-05T12:04:55,141 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742040_1216 (size=170) 2024-12-05T12:04:55,148 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=101, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=101, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-05T12:04:55,148 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=102, ppid=101, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 41a242823f4e7d8b34700dd3c99d5f0e}, {pid=103, ppid=101, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 6ef92a5f61de09d8e11a7a0b018f01dc}] 2024-12-05T12:04:55,149 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=102, ppid=101, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 41a242823f4e7d8b34700dd3c99d5f0e 2024-12-05T12:04:55,149 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=103, ppid=101, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 6ef92a5f61de09d8e11a7a0b018f01dc 2024-12-05T12:04:55,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=101 2024-12-05T12:04:55,301 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42305 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=102 2024-12-05T12:04:55,301 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=102}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemState,,1733400294448.41a242823f4e7d8b34700dd3c99d5f0e. 2024-12-05T12:04:55,301 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42913 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=103 2024-12-05T12:04:55,301 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=103}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemState,1,1733400294448.6ef92a5f61de09d8e11a7a0b018f01dc. 2024-12-05T12:04:55,301 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=102}] regionserver.HRegion(2603): Flush status journal for 41a242823f4e7d8b34700dd3c99d5f0e: 2024-12-05T12:04:55,301 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=102}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemState,,1733400294448.41a242823f4e7d8b34700dd3c99d5f0e. for emptySnaptb0-testExportFileSystemState completed. 2024-12-05T12:04:55,301 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=103}] regionserver.HRegion(2603): Flush status journal for 6ef92a5f61de09d8e11a7a0b018f01dc: 2024-12-05T12:04:55,302 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=103}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemState,1,1733400294448.6ef92a5f61de09d8e11a7a0b018f01dc. for emptySnaptb0-testExportFileSystemState completed. 2024-12-05T12:04:55,302 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=102}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemState,,1733400294448.41a242823f4e7d8b34700dd3c99d5f0e.' region-info for snapshot=emptySnaptb0-testExportFileSystemState 2024-12-05T12:04:55,302 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=103}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemState,1,1733400294448.6ef92a5f61de09d8e11a7a0b018f01dc.' region-info for snapshot=emptySnaptb0-testExportFileSystemState 2024-12-05T12:04:55,302 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=103}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-05T12:04:55,302 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=102}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-05T12:04:55,302 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=103}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-05T12:04:55,302 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=102}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-05T12:04:55,314 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742041_1217 (size=71) 2024-12-05T12:04:55,314 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742041_1217 (size=71) 2024-12-05T12:04:55,314 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742041_1217 (size=71) 2024-12-05T12:04:55,315 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=102}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemState,,1733400294448.41a242823f4e7d8b34700dd3c99d5f0e. 2024-12-05T12:04:55,315 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=102}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=102 2024-12-05T12:04:55,315 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742042_1218 (size=71) 2024-12-05T12:04:55,315 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742042_1218 (size=71) 2024-12-05T12:04:55,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] master.HMaster(4169): Remote procedure done, pid=102 2024-12-05T12:04:55,316 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportFileSystemState on region 41a242823f4e7d8b34700dd3c99d5f0e 2024-12-05T12:04:55,316 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=102, ppid=101, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 41a242823f4e7d8b34700dd3c99d5f0e 2024-12-05T12:04:55,316 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=103}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemState,1,1733400294448.6ef92a5f61de09d8e11a7a0b018f01dc. 2024-12-05T12:04:55,316 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=103}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=103 2024-12-05T12:04:55,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] master.HMaster(4169): Remote procedure done, pid=103 2024-12-05T12:04:55,317 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742042_1218 (size=71) 2024-12-05T12:04:55,318 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportFileSystemState on region 6ef92a5f61de09d8e11a7a0b018f01dc 2024-12-05T12:04:55,319 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=103, ppid=101, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 6ef92a5f61de09d8e11a7a0b018f01dc 2024-12-05T12:04:55,320 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=102, ppid=101, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 41a242823f4e7d8b34700dd3c99d5f0e in 169 msec 2024-12-05T12:04:55,321 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=103, resume processing ppid=101 2024-12-05T12:04:55,321 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=103, ppid=101, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 6ef92a5f61de09d8e11a7a0b018f01dc in 172 msec 2024-12-05T12:04:55,321 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=101, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=101, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-05T12:04:55,322 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=101, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=101, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-05T12:04:55,323 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-12-05T12:04:55,323 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-12-05T12:04:55,323 DEBUG [MobRegionSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T12:04:55,324 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(328): No files under family: cf 2024-12-05T12:04:55,331 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742043_1219 (size=63) 2024-12-05T12:04:55,331 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742043_1219 (size=63) 2024-12-05T12:04:55,332 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742043_1219 (size=63) 2024-12-05T12:04:55,333 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=101, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=101, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-05T12:04:55,333 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testExportFileSystemState 2024-12-05T12:04:55,333 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/.hbase-snapshot/.tmp/emptySnaptb0-testExportFileSystemState 2024-12-05T12:04:55,341 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742044_1220 (size=653) 2024-12-05T12:04:55,342 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742044_1220 (size=653) 2024-12-05T12:04:55,342 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742044_1220 (size=653) 2024-12-05T12:04:55,345 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=101, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=101, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-05T12:04:55,351 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=101, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=101, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-05T12:04:55,352 DEBUG [PEWorker-5 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/.hbase-snapshot/.tmp/emptySnaptb0-testExportFileSystemState to hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/.hbase-snapshot/emptySnaptb0-testExportFileSystemState 2024-12-05T12:04:55,356 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=101, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=101, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-05T12:04:55,356 DEBUG [PEWorker-5 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 }, snapshot procedure id = 101 2024-12-05T12:04:55,358 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=101, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=101, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } in 231 msec 2024-12-05T12:04:55,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=101 2024-12-05T12:04:55,449 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportFileSystemState completed 2024-12-05T12:04:55,456 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42305 {}] regionserver.HRegion(8528): writing data to region testtb-testExportFileSystemState,,1733400294448.41a242823f4e7d8b34700dd3c99d5f0e. with WAL disabled. Data may be lost in the event of a crash. 2024-12-05T12:04:55,460 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42913 {}] regionserver.HRegion(8528): writing data to region testtb-testExportFileSystemState,1,1733400294448.6ef92a5f61de09d8e11a7a0b018f01dc. with WAL disabled. Data may be lost in the event of a crash. 2024-12-05T12:04:55,462 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemState,, stopping at row=testtb-testExportFileSystemState ,, for max=2147483647 with caching=100 2024-12-05T12:04:55,465 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testExportFileSystemState 2024-12-05T12:04:55,465 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testExportFileSystemState,,1733400294448.41a242823f4e7d8b34700dd3c99d5f0e. 2024-12-05T12:04:55,465 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-05T12:04:55,467 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemState,, stopping at row=testtb-testExportFileSystemState ,, for max=2147483647 with caching=100 2024-12-05T12:04:55,473 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemState,, stopping at row=testtb-testExportFileSystemState ,, for max=2147483647 with caching=100 2024-12-05T12:04:55,481 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemState,, stopping at row=testtb-testExportFileSystemState ,, for max=2147483647 with caching=100 2024-12-05T12:04:55,485 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } 2024-12-05T12:04:55,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1733400295485 (current time:1733400295485). 2024-12-05T12:04:55,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-05T12:04:55,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snaptb0-testExportFileSystemState VERSION not specified, setting to 2 2024-12-05T12:04:55,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-05T12:04:55,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5672c0e9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T12:04:55,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] client.ClusterIdFetcher(90): Going to request 80666a11a09f,34947,-1 for getting cluster id 2024-12-05T12:04:55,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-05T12:04:55,487 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'af3bc8f5-5d92-4279-9ca6-4b73c59b8f54' 2024-12-05T12:04:55,487 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-05T12:04:55,487 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "af3bc8f5-5d92-4279-9ca6-4b73c59b8f54" 2024-12-05T12:04:55,487 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@546ca9cf, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T12:04:55,487 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [80666a11a09f,34947,-1] 2024-12-05T12:04:55,488 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-05T12:04:55,488 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T12:04:55,488 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47402, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-05T12:04:55,489 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2360610, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T12:04:55,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-05T12:04:55,491 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=80666a11a09f,37629,1733400130958, seqNum=-1] 2024-12-05T12:04:55,491 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-05T12:04:55,492 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55444, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-05T12:04:55,493 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947. 2024-12-05T12:04:55,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-05T12:04:55,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T12:04:55,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T12:04:55,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@573aabbb, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T12:04:55,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] client.ClusterIdFetcher(90): Going to request 80666a11a09f,34947,-1 for getting cluster id 2024-12-05T12:04:55,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-05T12:04:55,494 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-05T12:04:55,495 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'af3bc8f5-5d92-4279-9ca6-4b73c59b8f54' 2024-12-05T12:04:55,495 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-05T12:04:55,495 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "af3bc8f5-5d92-4279-9ca6-4b73c59b8f54" 2024-12-05T12:04:55,495 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4ee8b66c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T12:04:55,495 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [80666a11a09f,34947,-1] 2024-12-05T12:04:55,496 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-05T12:04:55,496 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T12:04:55,497 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47412, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-05T12:04:55,498 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5898ce1d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T12:04:55,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-05T12:04:55,499 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=80666a11a09f,37629,1733400130958, seqNum=-1] 2024-12-05T12:04:55,500 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-05T12:04:55,501 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55448, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-05T12:04:55,503 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportFileSystemState', locateType=CURRENT is [region=hbase:acl,,1733400133831.02119c1ced80736cbd0ab02ca19ee30c., hostname=80666a11a09f,42305,1733400131012, seqNum=2] 2024-12-05T12:04:55,503 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-05T12:04:55,505 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48360, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-05T12:04:55,506 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947. 2024-12-05T12:04:55,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor153.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-05T12:04:55,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T12:04:55,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T12:04:55,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemState], kv [jenkins: RWXCA] 2024-12-05T12:04:55,507 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-05T12:04:55,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-05T12:04:55,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] procedure2.ProcedureExecutor(1139): Stored pid=104, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } 2024-12-05T12:04:55,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 }, snapshot procedure id = 104 2024-12-05T12:04:55,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=104 2024-12-05T12:04:55,511 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=104, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-05T12:04:55,512 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=104, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-05T12:04:55,514 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=104, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-05T12:04:55,534 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742045_1221 (size=165) 2024-12-05T12:04:55,535 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742045_1221 (size=165) 2024-12-05T12:04:55,535 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742045_1221 (size=165) 2024-12-05T12:04:55,537 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=104, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-05T12:04:55,537 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=105, ppid=104, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 41a242823f4e7d8b34700dd3c99d5f0e}, {pid=106, ppid=104, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 6ef92a5f61de09d8e11a7a0b018f01dc}] 2024-12-05T12:04:55,538 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=106, ppid=104, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 6ef92a5f61de09d8e11a7a0b018f01dc 2024-12-05T12:04:55,539 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=105, ppid=104, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 41a242823f4e7d8b34700dd3c99d5f0e 2024-12-05T12:04:55,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=104 2024-12-05T12:04:55,690 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42305 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=105 2024-12-05T12:04:55,690 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42913 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=106 2024-12-05T12:04:55,690 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemState,,1733400294448.41a242823f4e7d8b34700dd3c99d5f0e. 2024-12-05T12:04:55,690 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemState,1,1733400294448.6ef92a5f61de09d8e11a7a0b018f01dc. 2024-12-05T12:04:55,691 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] regionserver.HRegion(2902): Flushing 41a242823f4e7d8b34700dd3c99d5f0e 1/1 column families, dataSize=65 B heapSize=400 B 2024-12-05T12:04:55,691 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] regionserver.HRegion(2902): Flushing 6ef92a5f61de09d8e11a7a0b018f01dc 1/1 column families, dataSize=3.19 KB heapSize=7.14 KB 2024-12-05T12:04:55,717 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b20241205e6864bec517a400e82f90eb923382639_6ef92a5f61de09d8e11a7a0b018f01dc is 71, key is 135c53c5b703c8d37f015cf162ca0da3/cf:q/1733400295460/Put/seqid=0 2024-12-05T12:04:55,723 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024120514cc49f8503c43d48c92a8040ae73a23_41a242823f4e7d8b34700dd3c99d5f0e is 69, key is 0d0ad637df2d315ad83c76c67732f78c9/cf:q/1733400295456/Put/seqid=0 2024-12-05T12:04:55,729 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742046_1222 (size=8311) 2024-12-05T12:04:55,730 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742046_1222 (size=8311) 2024-12-05T12:04:55,730 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742046_1222 (size=8311) 2024-12-05T12:04:55,731 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T12:04:55,738 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b20241205e6864bec517a400e82f90eb923382639_6ef92a5f61de09d8e11a7a0b018f01dc to hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/mobdir/data/default/testtb-testExportFileSystemState/8c2558c0b59bb3a4e2e578c2c8e8915d/cf/c4ca4238a0b923820dcc509a6f75849b20241205e6864bec517a400e82f90eb923382639_6ef92a5f61de09d8e11a7a0b018f01dc 2024-12-05T12:04:55,739 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportFileSystemState/6ef92a5f61de09d8e11a7a0b018f01dc/.tmp/cf/c074d3961d2a4e33a67f8e63a1b74579, store: [table=testtb-testExportFileSystemState family=cf region=6ef92a5f61de09d8e11a7a0b018f01dc] 2024-12-05T12:04:55,740 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportFileSystemState/6ef92a5f61de09d8e11a7a0b018f01dc/.tmp/cf/c074d3961d2a4e33a67f8e63a1b74579 is 209, key is 1893c7399e8190f1a7f62a4172d7920fe/cf:q/1733400295460/Put/seqid=0 2024-12-05T12:04:55,756 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742047_1223 (size=4964) 2024-12-05T12:04:55,756 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742047_1223 (size=4964) 2024-12-05T12:04:55,756 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742047_1223 (size=4964) 2024-12-05T12:04:55,757 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T12:04:55,763 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024120514cc49f8503c43d48c92a8040ae73a23_41a242823f4e7d8b34700dd3c99d5f0e to hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/mobdir/data/default/testtb-testExportFileSystemState/8c2558c0b59bb3a4e2e578c2c8e8915d/cf/d41d8cd98f00b204e9800998ecf8427e2024120514cc49f8503c43d48c92a8040ae73a23_41a242823f4e7d8b34700dd3c99d5f0e 2024-12-05T12:04:55,764 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742048_1224 (size=15407) 2024-12-05T12:04:55,764 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742048_1224 (size=15407) 2024-12-05T12:04:55,765 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportFileSystemState/41a242823f4e7d8b34700dd3c99d5f0e/.tmp/cf/314b7be63bf5412e9fe88aeffb3dc30b, store: [table=testtb-testExportFileSystemState family=cf region=41a242823f4e7d8b34700dd3c99d5f0e] 2024-12-05T12:04:55,765 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742048_1224 (size=15407) 2024-12-05T12:04:55,765 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=6, memsize=3.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportFileSystemState/6ef92a5f61de09d8e11a7a0b018f01dc/.tmp/cf/c074d3961d2a4e33a67f8e63a1b74579 2024-12-05T12:04:55,765 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportFileSystemState/41a242823f4e7d8b34700dd3c99d5f0e/.tmp/cf/314b7be63bf5412e9fe88aeffb3dc30b is 209, key is 0d0ad637df2d315ad83c76c67732f78c9/cf:q/1733400295456/Put/seqid=0 2024-12-05T12:04:55,778 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportFileSystemState/6ef92a5f61de09d8e11a7a0b018f01dc/.tmp/cf/c074d3961d2a4e33a67f8e63a1b74579 as hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportFileSystemState/6ef92a5f61de09d8e11a7a0b018f01dc/cf/c074d3961d2a4e33a67f8e63a1b74579 2024-12-05T12:04:55,787 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportFileSystemState/6ef92a5f61de09d8e11a7a0b018f01dc/cf/c074d3961d2a4e33a67f8e63a1b74579, entries=49, sequenceid=6, filesize=15.0 K 2024-12-05T12:04:55,788 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] regionserver.HRegion(3140): Finished flush of dataSize ~3.19 KB/3271, heapSize ~7.13 KB/7296, currentSize=0 B/0 for 6ef92a5f61de09d8e11a7a0b018f01dc in 97ms, sequenceid=6, compaction requested=false 2024-12-05T12:04:55,788 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] regionserver.HRegion(2603): Flush status journal for 6ef92a5f61de09d8e11a7a0b018f01dc: 2024-12-05T12:04:55,788 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemState,1,1733400294448.6ef92a5f61de09d8e11a7a0b018f01dc. for snaptb0-testExportFileSystemState completed. 2024-12-05T12:04:55,789 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemState,1,1733400294448.6ef92a5f61de09d8e11a7a0b018f01dc.' region-info for snapshot=snaptb0-testExportFileSystemState 2024-12-05T12:04:55,789 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-05T12:04:55,789 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportFileSystemState/6ef92a5f61de09d8e11a7a0b018f01dc/cf/c074d3961d2a4e33a67f8e63a1b74579] hfiles 2024-12-05T12:04:55,789 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportFileSystemState/6ef92a5f61de09d8e11a7a0b018f01dc/cf/c074d3961d2a4e33a67f8e63a1b74579 for snapshot=snaptb0-testExportFileSystemState 2024-12-05T12:04:55,796 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742049_1225 (size=5504) 2024-12-05T12:04:55,797 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742049_1225 (size=5504) 2024-12-05T12:04:55,799 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742049_1225 (size=5504) 2024-12-05T12:04:55,799 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=6, memsize=65, hasBloomFilter=true, into tmp file hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportFileSystemState/41a242823f4e7d8b34700dd3c99d5f0e/.tmp/cf/314b7be63bf5412e9fe88aeffb3dc30b 2024-12-05T12:04:55,806 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportFileSystemState/41a242823f4e7d8b34700dd3c99d5f0e/.tmp/cf/314b7be63bf5412e9fe88aeffb3dc30b as hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportFileSystemState/41a242823f4e7d8b34700dd3c99d5f0e/cf/314b7be63bf5412e9fe88aeffb3dc30b 2024-12-05T12:04:55,811 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportFileSystemState/41a242823f4e7d8b34700dd3c99d5f0e/cf/314b7be63bf5412e9fe88aeffb3dc30b, entries=1, sequenceid=6, filesize=5.4 K 2024-12-05T12:04:55,812 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] regionserver.HRegion(3140): Finished flush of dataSize ~65 B/65, heapSize ~384 B/384, currentSize=0 B/0 for 41a242823f4e7d8b34700dd3c99d5f0e in 121ms, sequenceid=6, compaction requested=false 2024-12-05T12:04:55,812 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] regionserver.HRegion(2603): Flush status journal for 41a242823f4e7d8b34700dd3c99d5f0e: 2024-12-05T12:04:55,812 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemState,,1733400294448.41a242823f4e7d8b34700dd3c99d5f0e. for snaptb0-testExportFileSystemState completed. 2024-12-05T12:04:55,813 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemState,,1733400294448.41a242823f4e7d8b34700dd3c99d5f0e.' region-info for snapshot=snaptb0-testExportFileSystemState 2024-12-05T12:04:55,813 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-05T12:04:55,813 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportFileSystemState/41a242823f4e7d8b34700dd3c99d5f0e/cf/314b7be63bf5412e9fe88aeffb3dc30b] hfiles 2024-12-05T12:04:55,813 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportFileSystemState/41a242823f4e7d8b34700dd3c99d5f0e/cf/314b7be63bf5412e9fe88aeffb3dc30b for snapshot=snaptb0-testExportFileSystemState 2024-12-05T12:04:55,813 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742050_1226 (size=110) 2024-12-05T12:04:55,813 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742050_1226 (size=110) 2024-12-05T12:04:55,813 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742050_1226 (size=110) 2024-12-05T12:04:55,814 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemState,1,1733400294448.6ef92a5f61de09d8e11a7a0b018f01dc. 2024-12-05T12:04:55,814 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=106 2024-12-05T12:04:55,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] master.HMaster(4169): Remote procedure done, pid=106 2024-12-05T12:04:55,815 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportFileSystemState on region 6ef92a5f61de09d8e11a7a0b018f01dc 2024-12-05T12:04:55,815 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=106, ppid=104, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 6ef92a5f61de09d8e11a7a0b018f01dc 2024-12-05T12:04:55,817 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=106, ppid=104, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 6ef92a5f61de09d8e11a7a0b018f01dc in 279 msec 2024-12-05T12:04:55,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=104 2024-12-05T12:04:55,836 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742051_1227 (size=110) 2024-12-05T12:04:55,836 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742051_1227 (size=110) 2024-12-05T12:04:55,836 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742051_1227 (size=110) 2024-12-05T12:04:55,837 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemState,,1733400294448.41a242823f4e7d8b34700dd3c99d5f0e. 2024-12-05T12:04:55,837 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=105 2024-12-05T12:04:55,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] master.HMaster(4169): Remote procedure done, pid=105 2024-12-05T12:04:55,838 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportFileSystemState on region 41a242823f4e7d8b34700dd3c99d5f0e 2024-12-05T12:04:55,838 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=105, ppid=104, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 41a242823f4e7d8b34700dd3c99d5f0e 2024-12-05T12:04:55,846 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=105, resume processing ppid=104 2024-12-05T12:04:55,846 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=104, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-05T12:04:55,846 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=105, ppid=104, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 41a242823f4e7d8b34700dd3c99d5f0e in 306 msec 2024-12-05T12:04:55,850 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=104, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-05T12:04:55,852 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-12-05T12:04:55,852 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-12-05T12:04:55,852 DEBUG [MobRegionSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T12:04:55,854 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(366): Adding snapshot references for [hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/mobdir/data/default/testtb-testExportFileSystemState/8c2558c0b59bb3a4e2e578c2c8e8915d/cf/c4ca4238a0b923820dcc509a6f75849b20241205e6864bec517a400e82f90eb923382639_6ef92a5f61de09d8e11a7a0b018f01dc, hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/mobdir/data/default/testtb-testExportFileSystemState/8c2558c0b59bb3a4e2e578c2c8e8915d/cf/d41d8cd98f00b204e9800998ecf8427e2024120514cc49f8503c43d48c92a8040ae73a23_41a242823f4e7d8b34700dd3c99d5f0e] hfiles 2024-12-05T12:04:55,854 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (1/2): hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/mobdir/data/default/testtb-testExportFileSystemState/8c2558c0b59bb3a4e2e578c2c8e8915d/cf/c4ca4238a0b923820dcc509a6f75849b20241205e6864bec517a400e82f90eb923382639_6ef92a5f61de09d8e11a7a0b018f01dc 2024-12-05T12:04:55,854 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (2/2): hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/mobdir/data/default/testtb-testExportFileSystemState/8c2558c0b59bb3a4e2e578c2c8e8915d/cf/d41d8cd98f00b204e9800998ecf8427e2024120514cc49f8503c43d48c92a8040ae73a23_41a242823f4e7d8b34700dd3c99d5f0e 2024-12-05T12:04:55,869 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742052_1228 (size=294) 2024-12-05T12:04:55,869 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742052_1228 (size=294) 2024-12-05T12:04:55,869 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742052_1228 (size=294) 2024-12-05T12:04:55,871 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=104, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-05T12:04:55,871 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testExportFileSystemState 2024-12-05T12:04:55,871 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemState 2024-12-05T12:04:55,890 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742053_1229 (size=963) 2024-12-05T12:04:55,890 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742053_1229 (size=963) 2024-12-05T12:04:55,891 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742053_1229 (size=963) 2024-12-05T12:04:55,897 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=104, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-05T12:04:55,905 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=104, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-05T12:04:55,906 DEBUG [PEWorker-1 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemState to hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/.hbase-snapshot/snaptb0-testExportFileSystemState 2024-12-05T12:04:55,908 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=104, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-05T12:04:55,908 DEBUG [PEWorker-1 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 }, snapshot procedure id = 104 2024-12-05T12:04:55,909 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=104, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } in 400 msec 2024-12-05T12:04:56,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=104 2024-12-05T12:04:56,138 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportFileSystemState completed 2024-12-05T12:04:56,138 INFO [Time-limited test {}] snapshot.TestExportSnapshot(515): HDFS export destination path: hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/export-test/export-1733400296138 2024-12-05T12:04:56,138 INFO [Time-limited test {}] snapshot.TestExportSnapshot(542): tgtFsUri=hdfs://localhost:37541, tgtDir=hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/export-test/export-1733400296138, rawTgtDir=hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/export-test/export-1733400296138, srcFsUri=hdfs://localhost:37541, srcDir=hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300 2024-12-05T12:04:56,169 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1094): inputFs=hdfs://localhost:37541, inputRoot=hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300 2024-12-05T12:04:56,169 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1095): outputFs=DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-954888707_22, ugi=jenkins (auth:SIMPLE)]], outputRoot=hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/export-test/export-1733400296138, skipTmp=false, initialOutputSnapshotDir=hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/export-test/export-1733400296138/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemState 2024-12-05T12:04:56,171 INFO [Time-limited test {}] snapshot.ExportSnapshot(1104): Verify the source snapshot's expiration status and integrity. 2024-12-05T12:04:56,176 INFO [Time-limited test {}] snapshot.ExportSnapshot(1162): Copy Snapshot Manifest from hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/.hbase-snapshot/snaptb0-testExportFileSystemState to hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/export-test/export-1733400296138/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemState 2024-12-05T12:04:56,211 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742054_1230 (size=165) 2024-12-05T12:04:56,211 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742055_1231 (size=963) 2024-12-05T12:04:56,211 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742054_1230 (size=165) 2024-12-05T12:04:56,211 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742054_1230 (size=165) 2024-12-05T12:04:56,213 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742055_1231 (size=963) 2024-12-05T12:04:56,213 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742055_1231 (size=963) 2024-12-05T12:04:56,215 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-common/target/hbase-common-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-05T12:04:56,215 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-protocol-shaded/target/hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-05T12:04:56,215 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-client/target/hbase-client-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-05T12:04:57,273 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/b25dcafd-a00c-c924-93f8-4d057d2ca201/hadoop-707813493272361149.jar 2024-12-05T12:04:57,273 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-05T12:04:57,274 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-05T12:04:57,355 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/b25dcafd-a00c-c924-93f8-4d057d2ca201/hadoop-10668998242143904933.jar 2024-12-05T12:04:57,356 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics/target/hbase-metrics-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-05T12:04:57,356 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics-api/target/hbase-metrics-api-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-05T12:04:57,357 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-replication/target/hbase-replication-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-05T12:04:57,357 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-http/target/hbase-http-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-05T12:04:57,357 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-procedure/target/hbase-procedure-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-05T12:04:57,358 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-zookeeper/target/hbase-zookeeper-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-05T12:04:57,358 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-12-05T12:04:57,358 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-12-05T12:04:57,358 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-12-05T12:04:57,359 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-12-05T12:04:57,359 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-12-05T12:04:57,359 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-12-05T12:04:57,360 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-12-05T12:04:57,360 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-12-05T12:04:57,367 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-12-05T12:04:57,367 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-12-05T12:04:57,367 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-12-05T12:04:57,368 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-05T12:04:57,368 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-05T12:04:57,368 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-05T12:04:57,369 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-05T12:04:57,369 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-05T12:04:57,369 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-05T12:04:57,369 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-05T12:04:57,441 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742056_1232 (size=131440) 2024-12-05T12:04:57,441 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742056_1232 (size=131440) 2024-12-05T12:04:57,441 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742056_1232 (size=131440) 2024-12-05T12:04:57,457 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742057_1233 (size=4188619) 2024-12-05T12:04:57,458 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742057_1233 (size=4188619) 2024-12-05T12:04:57,458 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742057_1233 (size=4188619) 2024-12-05T12:04:57,476 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742058_1234 (size=1323991) 2024-12-05T12:04:57,476 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742058_1234 (size=1323991) 2024-12-05T12:04:57,476 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742058_1234 (size=1323991) 2024-12-05T12:04:57,496 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742059_1235 (size=903934) 2024-12-05T12:04:57,496 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742059_1235 (size=903934) 2024-12-05T12:04:57,496 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742059_1235 (size=903934) 2024-12-05T12:04:57,516 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742060_1236 (size=8360360) 2024-12-05T12:04:57,517 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742060_1236 (size=8360360) 2024-12-05T12:04:57,517 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742060_1236 (size=8360360) 2024-12-05T12:04:57,526 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742061_1237 (size=1877034) 2024-12-05T12:04:57,528 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742061_1237 (size=1877034) 2024-12-05T12:04:57,528 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742061_1237 (size=1877034) 2024-12-05T12:04:57,534 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742062_1238 (size=77835) 2024-12-05T12:04:57,534 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742062_1238 (size=77835) 2024-12-05T12:04:57,534 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742062_1238 (size=77835) 2024-12-05T12:04:57,543 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742063_1239 (size=30949) 2024-12-05T12:04:57,543 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742063_1239 (size=30949) 2024-12-05T12:04:57,544 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742063_1239 (size=30949) 2024-12-05T12:04:57,560 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742064_1240 (size=1597213) 2024-12-05T12:04:57,560 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742064_1240 (size=1597213) 2024-12-05T12:04:57,560 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742064_1240 (size=1597213) 2024-12-05T12:04:57,580 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742065_1241 (size=4695811) 2024-12-05T12:04:57,580 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742065_1241 (size=4695811) 2024-12-05T12:04:57,581 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742065_1241 (size=4695811) 2024-12-05T12:04:57,587 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742066_1242 (size=232957) 2024-12-05T12:04:57,588 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742066_1242 (size=232957) 2024-12-05T12:04:57,588 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742066_1242 (size=232957) 2024-12-05T12:04:57,622 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742067_1243 (size=127628) 2024-12-05T12:04:57,622 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742067_1243 (size=127628) 2024-12-05T12:04:57,627 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742067_1243 (size=127628) 2024-12-05T12:04:57,684 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742068_1244 (size=20406) 2024-12-05T12:04:57,684 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742068_1244 (size=20406) 2024-12-05T12:04:57,684 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742068_1244 (size=20406) 2024-12-05T12:04:57,728 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742069_1245 (size=5175431) 2024-12-05T12:04:57,728 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742069_1245 (size=5175431) 2024-12-05T12:04:57,729 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742069_1245 (size=5175431) 2024-12-05T12:04:57,743 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742070_1246 (size=217634) 2024-12-05T12:04:57,744 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742070_1246 (size=217634) 2024-12-05T12:04:57,744 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742070_1246 (size=217634) 2024-12-05T12:04:57,754 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742071_1247 (size=1832290) 2024-12-05T12:04:57,754 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742071_1247 (size=1832290) 2024-12-05T12:04:57,754 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742071_1247 (size=1832290) 2024-12-05T12:04:57,786 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742072_1248 (size=322274) 2024-12-05T12:04:57,786 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742072_1248 (size=322274) 2024-12-05T12:04:57,787 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742072_1248 (size=322274) 2024-12-05T12:04:57,813 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742073_1249 (size=503880) 2024-12-05T12:04:57,814 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742073_1249 (size=503880) 2024-12-05T12:04:57,814 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742073_1249 (size=503880) 2024-12-05T12:04:57,825 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742074_1250 (size=443172) 2024-12-05T12:04:57,825 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742074_1250 (size=443172) 2024-12-05T12:04:57,826 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742074_1250 (size=443172) 2024-12-05T12:04:57,833 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742075_1251 (size=29229) 2024-12-05T12:04:57,834 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742075_1251 (size=29229) 2024-12-05T12:04:57,834 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742075_1251 (size=29229) 2024-12-05T12:04:57,850 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742076_1252 (size=24096) 2024-12-05T12:04:57,850 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742076_1252 (size=24096) 2024-12-05T12:04:57,850 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742076_1252 (size=24096) 2024-12-05T12:04:57,861 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742077_1253 (size=111872) 2024-12-05T12:04:57,861 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742077_1253 (size=111872) 2024-12-05T12:04:57,862 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742077_1253 (size=111872) 2024-12-05T12:04:57,923 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742078_1254 (size=6425023) 2024-12-05T12:04:57,923 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742078_1254 (size=6425023) 2024-12-05T12:04:57,923 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742078_1254 (size=6425023) 2024-12-05T12:04:57,944 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742079_1255 (size=45609) 2024-12-05T12:04:57,945 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742079_1255 (size=45609) 2024-12-05T12:04:57,945 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742079_1255 (size=45609) 2024-12-05T12:04:57,963 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742080_1256 (size=136454) 2024-12-05T12:04:57,963 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742080_1256 (size=136454) 2024-12-05T12:04:57,964 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742080_1256 (size=136454) 2024-12-05T12:04:57,965 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-12-05T12:04:57,967 INFO [Time-limited test {}] snapshot.ExportSnapshot(663): Loading Snapshot 'snaptb0-testExportFileSystemState' hfile list 2024-12-05T12:04:57,969 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=0 size=15.0 K 2024-12-05T12:04:57,970 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=1 size=8.1 K 2024-12-05T12:04:57,970 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=2 size=5.4 K 2024-12-05T12:04:57,970 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=3 size=4.8 K 2024-12-05T12:04:57,977 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742081_1257 (size=1035) 2024-12-05T12:04:57,977 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742081_1257 (size=1035) 2024-12-05T12:04:57,977 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742081_1257 (size=1035) 2024-12-05T12:04:57,985 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742082_1258 (size=35) 2024-12-05T12:04:57,985 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742082_1258 (size=35) 2024-12-05T12:04:57,986 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742082_1258 (size=35) 2024-12-05T12:04:58,015 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742083_1259 (size=304002) 2024-12-05T12:04:58,016 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742083_1259 (size=304002) 2024-12-05T12:04:58,016 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742083_1259 (size=304002) 2024-12-05T12:04:58,112 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-05T12:04:58,112 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-05T12:04:58,114 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733400137807_0003_000001 (auth:SIMPLE) from 127.0.0.1:37874 2024-12-05T12:04:58,126 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_693627247/yarn-7091037402/MiniMRCluster_693627247-localDir-nm-1_2/usercache/jenkins/appcache/application_1733400137807_0003/container_1733400137807_0003_01_000001/launch_container.sh] 2024-12-05T12:04:58,127 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_693627247/yarn-7091037402/MiniMRCluster_693627247-localDir-nm-1_2/usercache/jenkins/appcache/application_1733400137807_0003/container_1733400137807_0003_01_000001/container_tokens] 2024-12-05T12:04:58,127 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_693627247/yarn-7091037402/MiniMRCluster_693627247-localDir-nm-1_2/usercache/jenkins/appcache/application_1733400137807_0003/container_1733400137807_0003_01_000001/sysfs] 2024-12-05T12:04:58,987 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733400137807_0004_000001 (auth:SIMPLE) from 127.0.0.1:59030 2024-12-05T12:04:59,263 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-05T12:05:00,361 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemState 2024-12-05T12:05:00,361 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemState Metrics about Tables on a single HBase RegionServer 2024-12-05T12:05:00,362 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testExportWithResetTtl 2024-12-05T12:05:00,362 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithResetTtl 2024-12-05T12:05:05,864 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-05T12:05:06,492 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733400137807_0004_000001 (auth:SIMPLE) from 127.0.0.1:45334 2024-12-05T12:05:06,741 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742084_1260 (size=349700) 2024-12-05T12:05:06,741 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742084_1260 (size=349700) 2024-12-05T12:05:06,741 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742084_1260 (size=349700) 2024-12-05T12:05:08,700 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733400137807_0004_000001 (auth:SIMPLE) from 127.0.0.1:46098 2024-12-05T12:05:08,700 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733400137807_0004_000001 (auth:SIMPLE) from 127.0.0.1:34896 2024-12-05T12:05:08,981 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-05T12:05:09,565 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733400137807_0004_000001 (auth:SIMPLE) from 127.0.0.1:34898 2024-12-05T12:05:09,578 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733400137807_0004_000001 (auth:SIMPLE) from 127.0.0.1:46110 2024-12-05T12:05:12,118 WARN [NM Event dispatcher {}] containermanager.ContainerManagerImpl(1784): couldn't find container container_1733400137807_0004_01_000006 while processing FINISH_CONTAINERS event 2024-12-05T12:05:14,071 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742085_1261 (size=15407) 2024-12-05T12:05:14,071 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742085_1261 (size=15407) 2024-12-05T12:05:14,072 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742085_1261 (size=15407) 2024-12-05T12:05:14,435 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_693627247/yarn-7091037402/MiniMRCluster_693627247-localDir-nm-0_2/usercache/jenkins/appcache/application_1733400137807_0004/container_1733400137807_0004_01_000002/launch_container.sh] 2024-12-05T12:05:14,435 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_693627247/yarn-7091037402/MiniMRCluster_693627247-localDir-nm-0_2/usercache/jenkins/appcache/application_1733400137807_0004/container_1733400137807_0004_01_000002/container_tokens] 2024-12-05T12:05:14,435 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_693627247/yarn-7091037402/MiniMRCluster_693627247-localDir-nm-0_2/usercache/jenkins/appcache/application_1733400137807_0004/container_1733400137807_0004_01_000002/sysfs] 2024-12-05T12:05:14,741 DEBUG [master/80666a11a09f:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 41a242823f4e7d8b34700dd3c99d5f0e changed from -1.0 to 0.0, refreshing cache 2024-12-05T12:05:14,742 DEBUG [master/80666a11a09f:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 6ef92a5f61de09d8e11a7a0b018f01dc changed from -1.0 to 0.0, refreshing cache 2024-12-05T12:05:15,940 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742087_1263 (size=8311) 2024-12-05T12:05:15,941 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742087_1263 (size=8311) 2024-12-05T12:05:15,941 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742087_1263 (size=8311) 2024-12-05T12:05:16,162 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_693627247/yarn-7091037402/MiniMRCluster_693627247-localDir-nm-1_1/usercache/jenkins/appcache/application_1733400137807_0004/container_1733400137807_0004_01_000003/launch_container.sh] 2024-12-05T12:05:16,162 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_693627247/yarn-7091037402/MiniMRCluster_693627247-localDir-nm-1_1/usercache/jenkins/appcache/application_1733400137807_0004/container_1733400137807_0004_01_000003/container_tokens] 2024-12-05T12:05:16,162 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_693627247/yarn-7091037402/MiniMRCluster_693627247-localDir-nm-1_1/usercache/jenkins/appcache/application_1733400137807_0004/container_1733400137807_0004_01_000003/sysfs] 2024-12-05T12:05:16,273 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742088_1264 (size=5504) 2024-12-05T12:05:16,273 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742088_1264 (size=5504) 2024-12-05T12:05:16,273 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742088_1264 (size=5504) 2024-12-05T12:05:16,415 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742089_1265 (size=4964) 2024-12-05T12:05:16,416 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742089_1265 (size=4964) 2024-12-05T12:05:16,416 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742089_1265 (size=4964) 2024-12-05T12:05:16,432 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_693627247/yarn-7091037402/MiniMRCluster_693627247-localDir-nm-0_2/usercache/jenkins/appcache/application_1733400137807_0004/container_1733400137807_0004_01_000004/launch_container.sh] 2024-12-05T12:05:16,432 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_693627247/yarn-7091037402/MiniMRCluster_693627247-localDir-nm-0_2/usercache/jenkins/appcache/application_1733400137807_0004/container_1733400137807_0004_01_000004/container_tokens] 2024-12-05T12:05:16,432 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_693627247/yarn-7091037402/MiniMRCluster_693627247-localDir-nm-0_2/usercache/jenkins/appcache/application_1733400137807_0004/container_1733400137807_0004_01_000004/sysfs] 2024-12-05T12:05:16,475 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742086_1262 (size=31754) 2024-12-05T12:05:16,475 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742086_1262 (size=31754) 2024-12-05T12:05:16,476 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742086_1262 (size=31754) 2024-12-05T12:05:16,496 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742090_1266 (size=466) 2024-12-05T12:05:16,496 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742090_1266 (size=466) 2024-12-05T12:05:16,497 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742090_1266 (size=466) 2024-12-05T12:05:16,545 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742091_1267 (size=31754) 2024-12-05T12:05:16,545 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742091_1267 (size=31754) 2024-12-05T12:05:16,545 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742091_1267 (size=31754) 2024-12-05T12:05:16,571 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_693627247/yarn-7091037402/MiniMRCluster_693627247-localDir-nm-1_0/usercache/jenkins/appcache/application_1733400137807_0004/container_1733400137807_0004_01_000005/launch_container.sh] 2024-12-05T12:05:16,571 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_693627247/yarn-7091037402/MiniMRCluster_693627247-localDir-nm-1_0/usercache/jenkins/appcache/application_1733400137807_0004/container_1733400137807_0004_01_000005/container_tokens] 2024-12-05T12:05:16,571 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_693627247/yarn-7091037402/MiniMRCluster_693627247-localDir-nm-1_0/usercache/jenkins/appcache/application_1733400137807_0004/container_1733400137807_0004_01_000005/sysfs] 2024-12-05T12:05:16,574 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742092_1268 (size=349700) 2024-12-05T12:05:16,574 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742092_1268 (size=349700) 2024-12-05T12:05:16,575 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742092_1268 (size=349700) 2024-12-05T12:05:18,236 INFO [Time-limited test {}] snapshot.ExportSnapshot(1219): Finalize the Snapshot Export 2024-12-05T12:05:18,237 INFO [Time-limited test {}] snapshot.ExportSnapshot(1230): Verify the exported snapshot's expiration status and integrity. 2024-12-05T12:05:18,249 INFO [Time-limited test {}] snapshot.ExportSnapshot(1236): Export Completed: snaptb0-testExportFileSystemState 2024-12-05T12:05:18,249 INFO [Time-limited test {}] snapshot.TestExportSnapshot(409): Exported snapshot 2024-12-05T12:05:18,250 INFO [Time-limited test {}] snapshot.TestExportSnapshot(420): Verified filesystem state 2024-12-05T12:05:18,250 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-954888707_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/.hbase-snapshot/snaptb0-testExportFileSystemState at hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/.hbase-snapshot/snaptb0-testExportFileSystemState 2024-12-05T12:05:18,251 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/.hbase-snapshot/snaptb0-testExportFileSystemState/.snapshotinfo 2024-12-05T12:05:18,251 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/.hbase-snapshot/snaptb0-testExportFileSystemState/data.manifest 2024-12-05T12:05:18,251 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-954888707_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/export-test/export-1733400296138/.hbase-snapshot/snaptb0-testExportFileSystemState at hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/export-test/export-1733400296138/.hbase-snapshot/snaptb0-testExportFileSystemState 2024-12-05T12:05:18,252 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/export-test/export-1733400296138/.hbase-snapshot/snaptb0-testExportFileSystemState/.snapshotinfo 2024-12-05T12:05:18,252 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/export-test/export-1733400296138/.hbase-snapshot/snaptb0-testExportFileSystemState/data.manifest 2024-12-05T12:05:18,261 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.2 disable testtb-testExportFileSystemState 2024-12-05T12:05:18,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] procedure2.ProcedureExecutor(1139): Stored pid=107, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testtb-testExportFileSystemState 2024-12-05T12:05:18,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=107 2024-12-05T12:05:18,267 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733400318267"}]},"ts":"1733400318267"} 2024-12-05T12:05:18,270 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemState, state=DISABLING in hbase:meta 2024-12-05T12:05:18,270 INFO [PEWorker-4 {}] procedure.DisableTableProcedure(284): Set testtb-testExportFileSystemState to state=DISABLING 2024-12-05T12:05:18,272 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=108, ppid=107, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportFileSystemState}] 2024-12-05T12:05:18,274 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=109, ppid=108, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=41a242823f4e7d8b34700dd3c99d5f0e, UNASSIGN}, {pid=110, ppid=108, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=6ef92a5f61de09d8e11a7a0b018f01dc, UNASSIGN}] 2024-12-05T12:05:18,277 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=110, ppid=108, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=6ef92a5f61de09d8e11a7a0b018f01dc, UNASSIGN 2024-12-05T12:05:18,277 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=109, ppid=108, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=41a242823f4e7d8b34700dd3c99d5f0e, UNASSIGN 2024-12-05T12:05:18,279 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=109 updating hbase:meta row=41a242823f4e7d8b34700dd3c99d5f0e, regionState=CLOSING, regionLocation=80666a11a09f,42305,1733400131012 2024-12-05T12:05:18,280 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=110 updating hbase:meta row=6ef92a5f61de09d8e11a7a0b018f01dc, regionState=CLOSING, regionLocation=80666a11a09f,42913,1733400130845 2024-12-05T12:05:18,285 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=109, ppid=108, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=41a242823f4e7d8b34700dd3c99d5f0e, UNASSIGN because future has completed 2024-12-05T12:05:18,286 DEBUG [PEWorker-1 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-05T12:05:18,286 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=111, ppid=109, state=RUNNABLE, hasLock=false; CloseRegionProcedure 41a242823f4e7d8b34700dd3c99d5f0e, server=80666a11a09f,42305,1733400131012}] 2024-12-05T12:05:18,286 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=110, ppid=108, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=6ef92a5f61de09d8e11a7a0b018f01dc, UNASSIGN because future has completed 2024-12-05T12:05:18,291 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-05T12:05:18,291 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=112, ppid=110, state=RUNNABLE, hasLock=false; CloseRegionProcedure 6ef92a5f61de09d8e11a7a0b018f01dc, server=80666a11a09f,42913,1733400130845}] 2024-12-05T12:05:18,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=107 2024-12-05T12:05:18,442 INFO [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=111}] handler.UnassignRegionHandler(122): Close 41a242823f4e7d8b34700dd3c99d5f0e 2024-12-05T12:05:18,443 DEBUG [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=111}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-05T12:05:18,443 DEBUG [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=111}] regionserver.HRegion(1722): Closing 41a242823f4e7d8b34700dd3c99d5f0e, disabling compactions & flushes 2024-12-05T12:05:18,443 INFO [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=111}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemState,,1733400294448.41a242823f4e7d8b34700dd3c99d5f0e. 2024-12-05T12:05:18,443 DEBUG [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=111}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemState,,1733400294448.41a242823f4e7d8b34700dd3c99d5f0e. 2024-12-05T12:05:18,443 DEBUG [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=111}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemState,,1733400294448.41a242823f4e7d8b34700dd3c99d5f0e. after waiting 0 ms 2024-12-05T12:05:18,443 DEBUG [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=111}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemState,,1733400294448.41a242823f4e7d8b34700dd3c99d5f0e. 2024-12-05T12:05:18,446 INFO [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=112}] handler.UnassignRegionHandler(122): Close 6ef92a5f61de09d8e11a7a0b018f01dc 2024-12-05T12:05:18,446 DEBUG [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=112}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-05T12:05:18,446 DEBUG [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=112}] regionserver.HRegion(1722): Closing 6ef92a5f61de09d8e11a7a0b018f01dc, disabling compactions & flushes 2024-12-05T12:05:18,446 INFO [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=112}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemState,1,1733400294448.6ef92a5f61de09d8e11a7a0b018f01dc. 2024-12-05T12:05:18,446 DEBUG [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=112}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemState,1,1733400294448.6ef92a5f61de09d8e11a7a0b018f01dc. 2024-12-05T12:05:18,446 DEBUG [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=112}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemState,1,1733400294448.6ef92a5f61de09d8e11a7a0b018f01dc. after waiting 0 ms 2024-12-05T12:05:18,446 DEBUG [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=112}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemState,1,1733400294448.6ef92a5f61de09d8e11a7a0b018f01dc. 2024-12-05T12:05:18,460 DEBUG [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=111}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportFileSystemState/41a242823f4e7d8b34700dd3c99d5f0e/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-05T12:05:18,461 DEBUG [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=111}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-05T12:05:18,461 INFO [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=111}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemState,,1733400294448.41a242823f4e7d8b34700dd3c99d5f0e. 2024-12-05T12:05:18,461 DEBUG [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=111}] regionserver.HRegion(1676): Region close journal for 41a242823f4e7d8b34700dd3c99d5f0e: Waiting for close lock at 1733400318443Running coprocessor pre-close hooks at 1733400318443Disabling compacts and flushes for region at 1733400318443Disabling writes for close at 1733400318443Writing region close event to WAL at 1733400318444 (+1 ms)Running coprocessor post-close hooks at 1733400318461 (+17 ms)Closed at 1733400318461 2024-12-05T12:05:18,464 DEBUG [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=112}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportFileSystemState/6ef92a5f61de09d8e11a7a0b018f01dc/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-05T12:05:18,464 INFO [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=111}] handler.UnassignRegionHandler(157): Closed 41a242823f4e7d8b34700dd3c99d5f0e 2024-12-05T12:05:18,465 DEBUG [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=112}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-05T12:05:18,465 INFO [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=112}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemState,1,1733400294448.6ef92a5f61de09d8e11a7a0b018f01dc. 2024-12-05T12:05:18,465 DEBUG [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=112}] regionserver.HRegion(1676): Region close journal for 6ef92a5f61de09d8e11a7a0b018f01dc: Waiting for close lock at 1733400318446Running coprocessor pre-close hooks at 1733400318446Disabling compacts and flushes for region at 1733400318446Disabling writes for close at 1733400318446Writing region close event to WAL at 1733400318449 (+3 ms)Running coprocessor post-close hooks at 1733400318465 (+16 ms)Closed at 1733400318465 2024-12-05T12:05:18,465 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=109 updating hbase:meta row=41a242823f4e7d8b34700dd3c99d5f0e, regionState=CLOSED 2024-12-05T12:05:18,467 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=111, ppid=109, state=RUNNABLE, hasLock=false; CloseRegionProcedure 41a242823f4e7d8b34700dd3c99d5f0e, server=80666a11a09f,42305,1733400131012 because future has completed 2024-12-05T12:05:18,469 INFO [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=112}] handler.UnassignRegionHandler(157): Closed 6ef92a5f61de09d8e11a7a0b018f01dc 2024-12-05T12:05:18,471 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=110 updating hbase:meta row=6ef92a5f61de09d8e11a7a0b018f01dc, regionState=CLOSED 2024-12-05T12:05:18,474 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=112, ppid=110, state=RUNNABLE, hasLock=false; CloseRegionProcedure 6ef92a5f61de09d8e11a7a0b018f01dc, server=80666a11a09f,42913,1733400130845 because future has completed 2024-12-05T12:05:18,472 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=111, resume processing ppid=109 2024-12-05T12:05:18,475 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=111, ppid=109, state=SUCCESS, hasLock=false; CloseRegionProcedure 41a242823f4e7d8b34700dd3c99d5f0e, server=80666a11a09f,42305,1733400131012 in 183 msec 2024-12-05T12:05:18,478 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=109, ppid=108, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=41a242823f4e7d8b34700dd3c99d5f0e, UNASSIGN in 198 msec 2024-12-05T12:05:18,481 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=112, resume processing ppid=110 2024-12-05T12:05:18,483 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=112, ppid=110, state=SUCCESS, hasLock=false; CloseRegionProcedure 6ef92a5f61de09d8e11a7a0b018f01dc, server=80666a11a09f,42913,1733400130845 in 187 msec 2024-12-05T12:05:18,485 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=110, resume processing ppid=108 2024-12-05T12:05:18,485 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=110, ppid=108, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=6ef92a5f61de09d8e11a7a0b018f01dc, UNASSIGN in 207 msec 2024-12-05T12:05:18,488 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=108, resume processing ppid=107 2024-12-05T12:05:18,488 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=108, ppid=107, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportFileSystemState in 214 msec 2024-12-05T12:05:18,490 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733400318490"}]},"ts":"1733400318490"} 2024-12-05T12:05:18,493 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemState, state=DISABLED in hbase:meta 2024-12-05T12:05:18,493 INFO [PEWorker-1 {}] procedure.DisableTableProcedure(296): Set testtb-testExportFileSystemState to state=DISABLED 2024-12-05T12:05:18,497 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=107, state=SUCCESS, hasLock=false; DisableTableProcedure table=testtb-testExportFileSystemState in 233 msec 2024-12-05T12:05:18,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=107 2024-12-05T12:05:18,589 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testtb-testExportFileSystemState completed 2024-12-05T12:05:18,589 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.2 delete testtb-testExportFileSystemState 2024-12-05T12:05:18,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] procedure2.ProcedureExecutor(1139): Stored pid=113, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testtb-testExportFileSystemState 2024-12-05T12:05:18,592 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=113, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemState 2024-12-05T12:05:18,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] access.PermissionStorage(261): Removing permissions of removed table testtb-testExportFileSystemState 2024-12-05T12:05:18,593 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=113, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemState 2024-12-05T12:05:18,599 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42305 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testtb-testExportFileSystemState 2024-12-05T12:05:18,600 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportFileSystemState/6ef92a5f61de09d8e11a7a0b018f01dc 2024-12-05T12:05:18,600 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportFileSystemState/41a242823f4e7d8b34700dd3c99d5f0e 2024-12-05T12:05:18,602 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportFileSystemState/6ef92a5f61de09d8e11a7a0b018f01dc/cf, FileablePath, hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportFileSystemState/6ef92a5f61de09d8e11a7a0b018f01dc/recovered.edits] 2024-12-05T12:05:18,602 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportFileSystemState/41a242823f4e7d8b34700dd3c99d5f0e/cf, FileablePath, hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportFileSystemState/41a242823f4e7d8b34700dd3c99d5f0e/recovered.edits] 2024-12-05T12:05:18,607 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportFileSystemState/6ef92a5f61de09d8e11a7a0b018f01dc/cf/c074d3961d2a4e33a67f8e63a1b74579 to hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/archive/data/default/testtb-testExportFileSystemState/6ef92a5f61de09d8e11a7a0b018f01dc/cf/c074d3961d2a4e33a67f8e63a1b74579 2024-12-05T12:05:18,607 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportFileSystemState/41a242823f4e7d8b34700dd3c99d5f0e/cf/314b7be63bf5412e9fe88aeffb3dc30b to hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/archive/data/default/testtb-testExportFileSystemState/41a242823f4e7d8b34700dd3c99d5f0e/cf/314b7be63bf5412e9fe88aeffb3dc30b 2024-12-05T12:05:18,610 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportFileSystemState/41a242823f4e7d8b34700dd3c99d5f0e/recovered.edits/9.seqid to hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/archive/data/default/testtb-testExportFileSystemState/41a242823f4e7d8b34700dd3c99d5f0e/recovered.edits/9.seqid 2024-12-05T12:05:18,610 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportFileSystemState/6ef92a5f61de09d8e11a7a0b018f01dc/recovered.edits/9.seqid to hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/archive/data/default/testtb-testExportFileSystemState/6ef92a5f61de09d8e11a7a0b018f01dc/recovered.edits/9.seqid 2024-12-05T12:05:18,611 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportFileSystemState/6ef92a5f61de09d8e11a7a0b018f01dc 2024-12-05T12:05:18,612 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportFileSystemState/41a242823f4e7d8b34700dd3c99d5f0e 2024-12-05T12:05:18,613 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(313): Archived testtb-testExportFileSystemState regions 2024-12-05T12:05:18,613 DEBUG [PEWorker-4 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/mobdir/data/default/testtb-testExportFileSystemState/8c2558c0b59bb3a4e2e578c2c8e8915d 2024-12-05T12:05:18,614 DEBUG [PEWorker-4 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/mobdir/data/default/testtb-testExportFileSystemState/8c2558c0b59bb3a4e2e578c2c8e8915d/cf] 2024-12-05T12:05:18,618 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/mobdir/data/default/testtb-testExportFileSystemState/8c2558c0b59bb3a4e2e578c2c8e8915d/cf/c4ca4238a0b923820dcc509a6f75849b20241205e6864bec517a400e82f90eb923382639_6ef92a5f61de09d8e11a7a0b018f01dc to hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/archive/data/default/testtb-testExportFileSystemState/8c2558c0b59bb3a4e2e578c2c8e8915d/cf/c4ca4238a0b923820dcc509a6f75849b20241205e6864bec517a400e82f90eb923382639_6ef92a5f61de09d8e11a7a0b018f01dc 2024-12-05T12:05:18,620 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/mobdir/data/default/testtb-testExportFileSystemState/8c2558c0b59bb3a4e2e578c2c8e8915d/cf/d41d8cd98f00b204e9800998ecf8427e2024120514cc49f8503c43d48c92a8040ae73a23_41a242823f4e7d8b34700dd3c99d5f0e to hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/archive/data/default/testtb-testExportFileSystemState/8c2558c0b59bb3a4e2e578c2c8e8915d/cf/d41d8cd98f00b204e9800998ecf8427e2024120514cc49f8503c43d48c92a8040ae73a23_41a242823f4e7d8b34700dd3c99d5f0e 2024-12-05T12:05:18,620 DEBUG [PEWorker-4 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/mobdir/data/default/testtb-testExportFileSystemState/8c2558c0b59bb3a4e2e578c2c8e8915d 2024-12-05T12:05:18,623 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=113, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemState 2024-12-05T12:05:18,627 WARN [PEWorker-4 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 2 rows of testtb-testExportFileSystemState from hbase:meta 2024-12-05T12:05:18,630 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(407): Removing 'testtb-testExportFileSystemState' descriptor. 2024-12-05T12:05:18,632 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=113, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemState 2024-12-05T12:05:18,632 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(397): Removing 'testtb-testExportFileSystemState' from region states. 2024-12-05T12:05:18,632 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemState,,1733400294448.41a242823f4e7d8b34700dd3c99d5f0e.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733400318632"}]},"ts":"9223372036854775807"} 2024-12-05T12:05:18,632 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemState,1,1733400294448.6ef92a5f61de09d8e11a7a0b018f01dc.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733400318632"}]},"ts":"9223372036854775807"} 2024-12-05T12:05:18,635 INFO [PEWorker-4 {}] assignment.RegionStateStore(562): Deleted 2 regions from META 2024-12-05T12:05:18,635 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => 41a242823f4e7d8b34700dd3c99d5f0e, NAME => 'testtb-testExportFileSystemState,,1733400294448.41a242823f4e7d8b34700dd3c99d5f0e.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => 6ef92a5f61de09d8e11a7a0b018f01dc, NAME => 'testtb-testExportFileSystemState,1,1733400294448.6ef92a5f61de09d8e11a7a0b018f01dc.', STARTKEY => '1', ENDKEY => ''}] 2024-12-05T12:05:18,635 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(401): Marking 'testtb-testExportFileSystemState' as deleted. 2024-12-05T12:05:18,636 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733400318635"}]},"ts":"9223372036854775807"} 2024-12-05T12:05:18,643 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(867): Deleted table testtb-testExportFileSystemState state from META 2024-12-05T12:05:18,645 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(133): Finished pid=113, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemState 2024-12-05T12:05:18,647 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=113, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testtb-testExportFileSystemState in 56 msec 2024-12-05T12:05:18,651 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34947-0x101a6a890970000, quorum=127.0.0.1:49240, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemState 2024-12-05T12:05:18,651 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42305-0x101a6a890970003, quorum=127.0.0.1:49240, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemState 2024-12-05T12:05:18,651 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37629-0x101a6a890970002, quorum=127.0.0.1:49240, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemState 2024-12-05T12:05:18,651 DEBUG [pool-63-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42913-0x101a6a890970001, quorum=127.0.0.1:49240, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemState 2024-12-05T12:05:18,652 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemState with data PBUF 2024-12-05T12:05:18,652 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemState with data PBUF 2024-12-05T12:05:18,652 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemState with data PBUF 2024-12-05T12:05:18,652 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemState with data PBUF 2024-12-05T12:05:18,659 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37629-0x101a6a890970002, quorum=127.0.0.1:49240, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemState 2024-12-05T12:05:18,659 DEBUG [pool-63-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42913-0x101a6a890970001, quorum=127.0.0.1:49240, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemState 2024-12-05T12:05:18,659 DEBUG [pool-63-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42913-0x101a6a890970001, quorum=127.0.0.1:49240, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-05T12:05:18,659 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37629-0x101a6a890970002, quorum=127.0.0.1:49240, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-05T12:05:18,659 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34947-0x101a6a890970000, quorum=127.0.0.1:49240, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemState 2024-12-05T12:05:18,659 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34947-0x101a6a890970000, quorum=127.0.0.1:49240, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-05T12:05:18,659 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42305-0x101a6a890970003, quorum=127.0.0.1:49240, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemState 2024-12-05T12:05:18,659 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42305-0x101a6a890970003, quorum=127.0.0.1:49240, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-05T12:05:18,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=113 2024-12-05T12:05:18,661 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testtb-testExportFileSystemState 2024-12-05T12:05:18,661 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testtb-testExportFileSystemState completed 2024-12-05T12:05:18,667 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "emptySnaptb0-testExportFileSystemState" type: DISABLED 2024-12-05T12:05:18,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] snapshot.SnapshotManager(381): Deleting snapshot: emptySnaptb0-testExportFileSystemState 2024-12-05T12:05:18,671 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "snaptb0-testExportFileSystemState" type: DISABLED 2024-12-05T12:05:18,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] snapshot.SnapshotManager(381): Deleting snapshot: snaptb0-testExportFileSystemState 2024-12-05T12:05:18,697 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestMobSecureExportSnapshot#testExportFileSystemState Thread=809 (was 803) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-910331659_1 at /127.0.0.1:40620 [Waiting for operation #3] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #6 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1141419820_22 at /127.0.0.1:39936 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-12 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MoveIntermediateToDone Thread #1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool.commonPool-worker-6 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: LogDeleter #2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1177) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-954888707_22 at /127.0.0.1:57360 [Waiting for operation #3] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LogDeleter #2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1177) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MoveIntermediateToDone Thread #0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-954888707_22 at /127.0.0.1:44164 [Waiting for operation #9] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1141419820_22 at /127.0.0.1:39938 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Thread-4001 java.base@17.0.11/java.io.FileInputStream.readBytes(Native Method) java.base@17.0.11/java.io.FileInputStream.read(FileInputStream.java:276) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:282) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/sun.nio.cs.StreamDecoder.readBytes(StreamDecoder.java:281) java.base@17.0.11/sun.nio.cs.StreamDecoder.implRead(StreamDecoder.java:324) java.base@17.0.11/sun.nio.cs.StreamDecoder.read(StreamDecoder.java:189) java.base@17.0.11/java.io.InputStreamReader.read(InputStreamReader.java:177) java.base@17.0.11/java.io.BufferedReader.fill(BufferedReader.java:162) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:329) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:396) app//org.apache.hadoop.util.Shell$1.run(Shell.java:1025) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:39451 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: process reaper (pid 138569) java.base@17.0.11/java.lang.ProcessHandleImpl.waitForProcessExit0(Native Method) java.base@17.0.11/java.lang.ProcessHandleImpl$1.run(ProcessHandleImpl.java:150) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-13 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1055630025) connection to localhost/127.0.0.1:39451 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: MoveIntermediateToDone Thread #2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=822 (was 814) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=866 (was 802) - SystemLoadAverage LEAK? -, ProcessCount=17 (was 19), AvailableMemoryMB=3316 (was 2099) - AvailableMemoryMB LEAK? - 2024-12-05T12:05:18,697 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=809 is superior to 500 2024-12-05T12:05:18,719 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestMobSecureExportSnapshot#testConsecutiveExports Thread=809, OpenFileDescriptor=822, MaxFileDescriptor=1048576, SystemLoadAverage=866, ProcessCount=17, AvailableMemoryMB=3313 2024-12-05T12:05:18,719 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=809 is superior to 500 2024-12-05T12:05:18,721 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'testtb-testConsecutiveExports', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-05T12:05:18,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] procedure2.ProcedureExecutor(1139): Stored pid=114, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testtb-testConsecutiveExports 2024-12-05T12:05:18,724 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=114, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testConsecutiveExports execute state=CREATE_TABLE_PRE_OPERATION 2024-12-05T12:05:18,724 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testConsecutiveExports" procId is: 114 2024-12-05T12:05:18,725 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=114, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testtb-testConsecutiveExports execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-05T12:05:18,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=114 2024-12-05T12:05:18,739 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742093_1269 (size=440) 2024-12-05T12:05:18,739 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742093_1269 (size=440) 2024-12-05T12:05:18,739 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742093_1269 (size=440) 2024-12-05T12:05:18,741 INFO [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 4e3cf411680795f234fe865ede1cac68, NAME => 'testtb-testConsecutiveExports,,1733400318721.4e3cf411680795f234fe865ede1cac68.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testConsecutiveExports', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300 2024-12-05T12:05:18,744 INFO [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => 9909a1dd17e17384969b8f6599a993d2, NAME => 'testtb-testConsecutiveExports,1,1733400318721.9909a1dd17e17384969b8f6599a993d2.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testConsecutiveExports', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300 2024-12-05T12:05:18,760 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742094_1270 (size=65) 2024-12-05T12:05:18,760 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742094_1270 (size=65) 2024-12-05T12:05:18,760 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742094_1270 (size=65) 2024-12-05T12:05:18,761 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(898): Instantiated testtb-testConsecutiveExports,,1733400318721.4e3cf411680795f234fe865ede1cac68.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-05T12:05:18,761 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(1722): Closing 4e3cf411680795f234fe865ede1cac68, disabling compactions & flushes 2024-12-05T12:05:18,761 INFO [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(1755): Closing region testtb-testConsecutiveExports,,1733400318721.4e3cf411680795f234fe865ede1cac68. 2024-12-05T12:05:18,761 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testConsecutiveExports,,1733400318721.4e3cf411680795f234fe865ede1cac68. 2024-12-05T12:05:18,761 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testConsecutiveExports,,1733400318721.4e3cf411680795f234fe865ede1cac68. after waiting 0 ms 2024-12-05T12:05:18,761 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testConsecutiveExports,,1733400318721.4e3cf411680795f234fe865ede1cac68. 2024-12-05T12:05:18,761 INFO [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(1973): Closed testtb-testConsecutiveExports,,1733400318721.4e3cf411680795f234fe865ede1cac68. 2024-12-05T12:05:18,761 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(1676): Region close journal for 4e3cf411680795f234fe865ede1cac68: Waiting for close lock at 1733400318761Disabling compacts and flushes for region at 1733400318761Disabling writes for close at 1733400318761Writing region close event to WAL at 1733400318761Closed at 1733400318761 2024-12-05T12:05:18,768 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742095_1271 (size=65) 2024-12-05T12:05:18,769 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742095_1271 (size=65) 2024-12-05T12:05:18,769 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742095_1271 (size=65) 2024-12-05T12:05:18,770 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(898): Instantiated testtb-testConsecutiveExports,1,1733400318721.9909a1dd17e17384969b8f6599a993d2.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-05T12:05:18,770 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(1722): Closing 9909a1dd17e17384969b8f6599a993d2, disabling compactions & flushes 2024-12-05T12:05:18,770 INFO [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(1755): Closing region testtb-testConsecutiveExports,1,1733400318721.9909a1dd17e17384969b8f6599a993d2. 2024-12-05T12:05:18,770 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testConsecutiveExports,1,1733400318721.9909a1dd17e17384969b8f6599a993d2. 2024-12-05T12:05:18,770 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testConsecutiveExports,1,1733400318721.9909a1dd17e17384969b8f6599a993d2. after waiting 0 ms 2024-12-05T12:05:18,770 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testConsecutiveExports,1,1733400318721.9909a1dd17e17384969b8f6599a993d2. 2024-12-05T12:05:18,770 INFO [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(1973): Closed testtb-testConsecutiveExports,1,1733400318721.9909a1dd17e17384969b8f6599a993d2. 2024-12-05T12:05:18,770 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(1676): Region close journal for 9909a1dd17e17384969b8f6599a993d2: Waiting for close lock at 1733400318770Disabling compacts and flushes for region at 1733400318770Disabling writes for close at 1733400318770Writing region close event to WAL at 1733400318770Closed at 1733400318770 2024-12-05T12:05:18,771 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=114, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testtb-testConsecutiveExports execute state=CREATE_TABLE_ADD_TO_META 2024-12-05T12:05:18,772 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testConsecutiveExports,,1733400318721.4e3cf411680795f234fe865ede1cac68.","families":{"info":[{"qualifier":"regioninfo","vlen":64,"tag":[],"timestamp":"1733400318771"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733400318771"}]},"ts":"1733400318771"} 2024-12-05T12:05:18,772 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testConsecutiveExports,1,1733400318721.9909a1dd17e17384969b8f6599a993d2.","families":{"info":[{"qualifier":"regioninfo","vlen":64,"tag":[],"timestamp":"1733400318771"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733400318771"}]},"ts":"1733400318771"} 2024-12-05T12:05:18,774 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-12-05T12:05:18,776 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=114, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testtb-testConsecutiveExports execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-05T12:05:18,776 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testConsecutiveExports","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733400318776"}]},"ts":"1733400318776"} 2024-12-05T12:05:18,778 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testConsecutiveExports, state=ENABLING in hbase:meta 2024-12-05T12:05:18,778 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(204): Hosts are {80666a11a09f=0} racks are {/default-rack=0} 2024-12-05T12:05:18,780 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-05T12:05:18,780 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-05T12:05:18,780 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-05T12:05:18,780 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-05T12:05:18,780 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-05T12:05:18,780 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-05T12:05:18,780 INFO [PEWorker-5 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-05T12:05:18,780 INFO [PEWorker-5 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-05T12:05:18,780 INFO [PEWorker-5 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-05T12:05:18,780 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-05T12:05:18,780 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=115, ppid=114, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=4e3cf411680795f234fe865ede1cac68, ASSIGN}, {pid=116, ppid=114, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=9909a1dd17e17384969b8f6599a993d2, ASSIGN}] 2024-12-05T12:05:18,782 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=115, ppid=114, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=4e3cf411680795f234fe865ede1cac68, ASSIGN 2024-12-05T12:05:18,782 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=116, ppid=114, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=9909a1dd17e17384969b8f6599a993d2, ASSIGN 2024-12-05T12:05:18,783 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=115, ppid=114, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=4e3cf411680795f234fe865ede1cac68, ASSIGN; state=OFFLINE, location=80666a11a09f,42913,1733400130845; forceNewPlan=false, retain=false 2024-12-05T12:05:18,783 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(269): Starting pid=116, ppid=114, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=9909a1dd17e17384969b8f6599a993d2, ASSIGN; state=OFFLINE, location=80666a11a09f,42305,1733400131012; forceNewPlan=false, retain=false 2024-12-05T12:05:18,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=114 2024-12-05T12:05:18,933 INFO [80666a11a09f:34947 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-12-05T12:05:18,933 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=115 updating hbase:meta row=4e3cf411680795f234fe865ede1cac68, regionState=OPENING, regionLocation=80666a11a09f,42913,1733400130845 2024-12-05T12:05:18,933 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=116 updating hbase:meta row=9909a1dd17e17384969b8f6599a993d2, regionState=OPENING, regionLocation=80666a11a09f,42305,1733400131012 2024-12-05T12:05:18,936 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=116, ppid=114, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=9909a1dd17e17384969b8f6599a993d2, ASSIGN because future has completed 2024-12-05T12:05:18,936 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=117, ppid=116, state=RUNNABLE, hasLock=false; OpenRegionProcedure 9909a1dd17e17384969b8f6599a993d2, server=80666a11a09f,42305,1733400131012}] 2024-12-05T12:05:18,941 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=115, ppid=114, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=4e3cf411680795f234fe865ede1cac68, ASSIGN because future has completed 2024-12-05T12:05:18,942 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=118, ppid=115, state=RUNNABLE, hasLock=false; OpenRegionProcedure 4e3cf411680795f234fe865ede1cac68, server=80666a11a09f,42913,1733400130845}] 2024-12-05T12:05:19,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=114 2024-12-05T12:05:19,097 INFO [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] handler.AssignRegionHandler(132): Open testtb-testConsecutiveExports,1,1733400318721.9909a1dd17e17384969b8f6599a993d2. 2024-12-05T12:05:19,097 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegion(7752): Opening region: {ENCODED => 9909a1dd17e17384969b8f6599a993d2, NAME => 'testtb-testConsecutiveExports,1,1733400318721.9909a1dd17e17384969b8f6599a993d2.', STARTKEY => '1', ENDKEY => ''} 2024-12-05T12:05:19,098 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testConsecutiveExports,1,1733400318721.9909a1dd17e17384969b8f6599a993d2. service=AccessControlService 2024-12-05T12:05:19,099 INFO [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-05T12:05:19,099 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testConsecutiveExports 9909a1dd17e17384969b8f6599a993d2 2024-12-05T12:05:19,099 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegion(898): Instantiated testtb-testConsecutiveExports,1,1733400318721.9909a1dd17e17384969b8f6599a993d2.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-05T12:05:19,099 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegion(7794): checking encryption for 9909a1dd17e17384969b8f6599a993d2 2024-12-05T12:05:19,099 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegion(7797): checking classloading for 9909a1dd17e17384969b8f6599a993d2 2024-12-05T12:05:19,101 INFO [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] handler.AssignRegionHandler(132): Open testtb-testConsecutiveExports,,1733400318721.4e3cf411680795f234fe865ede1cac68. 2024-12-05T12:05:19,102 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] regionserver.HRegion(7752): Opening region: {ENCODED => 4e3cf411680795f234fe865ede1cac68, NAME => 'testtb-testConsecutiveExports,,1733400318721.4e3cf411680795f234fe865ede1cac68.', STARTKEY => '', ENDKEY => '1'} 2024-12-05T12:05:19,102 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testConsecutiveExports,,1733400318721.4e3cf411680795f234fe865ede1cac68. service=AccessControlService 2024-12-05T12:05:19,103 INFO [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-05T12:05:19,103 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testConsecutiveExports 4e3cf411680795f234fe865ede1cac68 2024-12-05T12:05:19,103 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] regionserver.HRegion(898): Instantiated testtb-testConsecutiveExports,,1733400318721.4e3cf411680795f234fe865ede1cac68.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-05T12:05:19,103 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] regionserver.HRegion(7794): checking encryption for 4e3cf411680795f234fe865ede1cac68 2024-12-05T12:05:19,103 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] regionserver.HRegion(7797): checking classloading for 4e3cf411680795f234fe865ede1cac68 2024-12-05T12:05:19,105 INFO [StoreOpener-9909a1dd17e17384969b8f6599a993d2-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 9909a1dd17e17384969b8f6599a993d2 2024-12-05T12:05:19,107 INFO [StoreOpener-4e3cf411680795f234fe865ede1cac68-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 4e3cf411680795f234fe865ede1cac68 2024-12-05T12:05:19,110 INFO [StoreOpener-9909a1dd17e17384969b8f6599a993d2-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 9909a1dd17e17384969b8f6599a993d2 columnFamilyName cf 2024-12-05T12:05:19,111 INFO [StoreOpener-4e3cf411680795f234fe865ede1cac68-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 4e3cf411680795f234fe865ede1cac68 columnFamilyName cf 2024-12-05T12:05:19,112 DEBUG [StoreOpener-9909a1dd17e17384969b8f6599a993d2-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T12:05:19,116 DEBUG [StoreOpener-4e3cf411680795f234fe865ede1cac68-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T12:05:19,117 INFO [StoreOpener-9909a1dd17e17384969b8f6599a993d2-1 {}] regionserver.HStore(327): Store=9909a1dd17e17384969b8f6599a993d2/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-05T12:05:19,117 INFO [StoreOpener-4e3cf411680795f234fe865ede1cac68-1 {}] regionserver.HStore(327): Store=4e3cf411680795f234fe865ede1cac68/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-05T12:05:19,117 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegion(1038): replaying wal for 9909a1dd17e17384969b8f6599a993d2 2024-12-05T12:05:19,118 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] regionserver.HRegion(1038): replaying wal for 4e3cf411680795f234fe865ede1cac68 2024-12-05T12:05:19,118 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testConsecutiveExports/4e3cf411680795f234fe865ede1cac68 2024-12-05T12:05:19,118 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testConsecutiveExports/9909a1dd17e17384969b8f6599a993d2 2024-12-05T12:05:19,119 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testConsecutiveExports/4e3cf411680795f234fe865ede1cac68 2024-12-05T12:05:19,119 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testConsecutiveExports/9909a1dd17e17384969b8f6599a993d2 2024-12-05T12:05:19,119 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] regionserver.HRegion(1048): stopping wal replay for 4e3cf411680795f234fe865ede1cac68 2024-12-05T12:05:19,119 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegion(1048): stopping wal replay for 9909a1dd17e17384969b8f6599a993d2 2024-12-05T12:05:19,119 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] regionserver.HRegion(1060): Cleaning up temporary data for 4e3cf411680795f234fe865ede1cac68 2024-12-05T12:05:19,119 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegion(1060): Cleaning up temporary data for 9909a1dd17e17384969b8f6599a993d2 2024-12-05T12:05:19,121 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] regionserver.HRegion(1093): writing seq id for 4e3cf411680795f234fe865ede1cac68 2024-12-05T12:05:19,121 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegion(1093): writing seq id for 9909a1dd17e17384969b8f6599a993d2 2024-12-05T12:05:19,124 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testConsecutiveExports/9909a1dd17e17384969b8f6599a993d2/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-05T12:05:19,125 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testConsecutiveExports/4e3cf411680795f234fe865ede1cac68/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-05T12:05:19,125 INFO [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegion(1114): Opened 9909a1dd17e17384969b8f6599a993d2; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=63235463, jitterRate=-0.057718172669410706}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-05T12:05:19,125 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 9909a1dd17e17384969b8f6599a993d2 2024-12-05T12:05:19,125 INFO [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] regionserver.HRegion(1114): Opened 4e3cf411680795f234fe865ede1cac68; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=67365148, jitterRate=0.0038189291954040527}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-05T12:05:19,125 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 4e3cf411680795f234fe865ede1cac68 2024-12-05T12:05:19,127 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegion(1006): Region open journal for 9909a1dd17e17384969b8f6599a993d2: Running coprocessor pre-open hook at 1733400319100Writing region info on filesystem at 1733400319100Initializing all the Stores at 1733400319104 (+4 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733400319104Cleaning up temporary data from old regions at 1733400319119 (+15 ms)Running coprocessor post-open hooks at 1733400319125 (+6 ms)Region opened successfully at 1733400319127 (+2 ms) 2024-12-05T12:05:19,128 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] regionserver.HRegion(1006): Region open journal for 4e3cf411680795f234fe865ede1cac68: Running coprocessor pre-open hook at 1733400319104Writing region info on filesystem at 1733400319104Initializing all the Stores at 1733400319106 (+2 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733400319106Cleaning up temporary data from old regions at 1733400319119 (+13 ms)Running coprocessor post-open hooks at 1733400319125 (+6 ms)Region opened successfully at 1733400319128 (+3 ms) 2024-12-05T12:05:19,128 INFO [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testConsecutiveExports,1,1733400318721.9909a1dd17e17384969b8f6599a993d2., pid=117, masterSystemTime=1733400319092 2024-12-05T12:05:19,131 INFO [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testConsecutiveExports,,1733400318721.4e3cf411680795f234fe865ede1cac68., pid=118, masterSystemTime=1733400319096 2024-12-05T12:05:19,134 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=116 updating hbase:meta row=9909a1dd17e17384969b8f6599a993d2, regionState=OPEN, openSeqNum=2, regionLocation=80666a11a09f,42305,1733400131012 2024-12-05T12:05:19,138 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testConsecutiveExports,,1733400318721.4e3cf411680795f234fe865ede1cac68. 2024-12-05T12:05:19,138 INFO [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] handler.AssignRegionHandler(153): Opened testtb-testConsecutiveExports,,1733400318721.4e3cf411680795f234fe865ede1cac68. 2024-12-05T12:05:19,138 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=117, ppid=116, state=RUNNABLE, hasLock=false; OpenRegionProcedure 9909a1dd17e17384969b8f6599a993d2, server=80666a11a09f,42305,1733400131012 because future has completed 2024-12-05T12:05:19,138 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testConsecutiveExports,1,1733400318721.9909a1dd17e17384969b8f6599a993d2. 2024-12-05T12:05:19,138 INFO [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] handler.AssignRegionHandler(153): Opened testtb-testConsecutiveExports,1,1733400318721.9909a1dd17e17384969b8f6599a993d2. 2024-12-05T12:05:19,139 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=115 updating hbase:meta row=4e3cf411680795f234fe865ede1cac68, regionState=OPEN, openSeqNum=2, regionLocation=80666a11a09f,42913,1733400130845 2024-12-05T12:05:19,142 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=118, ppid=115, state=RUNNABLE, hasLock=false; OpenRegionProcedure 4e3cf411680795f234fe865ede1cac68, server=80666a11a09f,42913,1733400130845 because future has completed 2024-12-05T12:05:19,146 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=117, resume processing ppid=116 2024-12-05T12:05:19,148 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=117, ppid=116, state=SUCCESS, hasLock=false; OpenRegionProcedure 9909a1dd17e17384969b8f6599a993d2, server=80666a11a09f,42305,1733400131012 in 205 msec 2024-12-05T12:05:19,148 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=118, resume processing ppid=115 2024-12-05T12:05:19,148 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=116, ppid=114, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=9909a1dd17e17384969b8f6599a993d2, ASSIGN in 366 msec 2024-12-05T12:05:19,148 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=118, ppid=115, state=SUCCESS, hasLock=false; OpenRegionProcedure 4e3cf411680795f234fe865ede1cac68, server=80666a11a09f,42913,1733400130845 in 203 msec 2024-12-05T12:05:19,151 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=115, resume processing ppid=114 2024-12-05T12:05:19,151 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=115, ppid=114, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=4e3cf411680795f234fe865ede1cac68, ASSIGN in 368 msec 2024-12-05T12:05:19,152 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=114, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testtb-testConsecutiveExports execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-05T12:05:19,152 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testConsecutiveExports","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733400319152"}]},"ts":"1733400319152"} 2024-12-05T12:05:19,155 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testConsecutiveExports, state=ENABLED in hbase:meta 2024-12-05T12:05:19,157 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=114, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testConsecutiveExports execute state=CREATE_TABLE_POST_OPERATION 2024-12-05T12:05:19,158 DEBUG [PEWorker-3 {}] access.PermissionStorage(177): Writing permission with rowKey testtb-testConsecutiveExports jenkins: RWXCA 2024-12-05T12:05:19,162 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42305 {}] access.PermissionStorage(613): Read acl: entry[testtb-testConsecutiveExports], kv [jenkins: RWXCA] 2024-12-05T12:05:19,201 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37629-0x101a6a890970002, quorum=127.0.0.1:49240, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-05T12:05:19,201 DEBUG [pool-63-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42913-0x101a6a890970001, quorum=127.0.0.1:49240, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-05T12:05:19,201 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42305-0x101a6a890970003, quorum=127.0.0.1:49240, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-05T12:05:19,201 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34947-0x101a6a890970000, quorum=127.0.0.1:49240, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-05T12:05:19,209 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testConsecutiveExports with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testConsecutiveExports \x00 \x01 \x02 \x03 \x04 2024-12-05T12:05:19,209 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testConsecutiveExports with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testConsecutiveExports \x00 \x01 \x02 \x03 \x04 2024-12-05T12:05:19,209 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testConsecutiveExports with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testConsecutiveExports \x00 \x01 \x02 \x03 \x04 2024-12-05T12:05:19,210 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testConsecutiveExports with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testConsecutiveExports \x00 \x01 \x02 \x03 \x04 2024-12-05T12:05:19,212 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=114, state=SUCCESS, hasLock=false; CreateTableProcedure table=testtb-testConsecutiveExports in 488 msec 2024-12-05T12:05:19,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=114 2024-12-05T12:05:19,359 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testtb-testConsecutiveExports completed 2024-12-05T12:05:19,359 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testConsecutiveExports,, stopping at row=testtb-testConsecutiveExports ,, for max=2147483647 with caching=100 2024-12-05T12:05:19,362 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testConsecutiveExports 2024-12-05T12:05:19,362 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testConsecutiveExports,,1733400318721.4e3cf411680795f234fe865ede1cac68. 2024-12-05T12:05:19,363 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-05T12:05:19,366 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testConsecutiveExports,, stopping at row=testtb-testConsecutiveExports ,, for max=2147483647 with caching=100 2024-12-05T12:05:19,376 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testConsecutiveExports,, stopping at row=testtb-testConsecutiveExports ,, for max=2147483647 with caching=100 2024-12-05T12:05:19,384 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testConsecutiveExports,, stopping at row=testtb-testConsecutiveExports ,, for max=2147483647 with caching=100 2024-12-05T12:05:19,388 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } 2024-12-05T12:05:19,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1733400319388 (current time:1733400319388). 2024-12-05T12:05:19,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-05T12:05:19,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot emptySnaptb0-testConsecutiveExports VERSION not specified, setting to 2 2024-12-05T12:05:19,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-05T12:05:19,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@310b698f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T12:05:19,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] client.ClusterIdFetcher(90): Going to request 80666a11a09f,34947,-1 for getting cluster id 2024-12-05T12:05:19,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-05T12:05:19,391 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'af3bc8f5-5d92-4279-9ca6-4b73c59b8f54' 2024-12-05T12:05:19,391 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-05T12:05:19,391 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "af3bc8f5-5d92-4279-9ca6-4b73c59b8f54" 2024-12-05T12:05:19,392 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1ecd68be, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T12:05:19,392 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [80666a11a09f,34947,-1] 2024-12-05T12:05:19,392 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-05T12:05:19,392 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T12:05:19,393 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46122, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-05T12:05:19,394 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2322ad2, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T12:05:19,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-05T12:05:19,396 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=80666a11a09f,37629,1733400130958, seqNum=-1] 2024-12-05T12:05:19,396 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-05T12:05:19,398 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56400, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-05T12:05:19,400 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947. 2024-12-05T12:05:19,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-05T12:05:19,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T12:05:19,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T12:05:19,400 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-05T12:05:19,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3ef4714b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T12:05:19,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] client.ClusterIdFetcher(90): Going to request 80666a11a09f,34947,-1 for getting cluster id 2024-12-05T12:05:19,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-05T12:05:19,402 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'af3bc8f5-5d92-4279-9ca6-4b73c59b8f54' 2024-12-05T12:05:19,402 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-05T12:05:19,402 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "af3bc8f5-5d92-4279-9ca6-4b73c59b8f54" 2024-12-05T12:05:19,403 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1cd3e9e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T12:05:19,403 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [80666a11a09f,34947,-1] 2024-12-05T12:05:19,403 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-05T12:05:19,403 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T12:05:19,404 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46146, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-05T12:05:19,404 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1515a2e5, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T12:05:19,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-05T12:05:19,406 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=80666a11a09f,37629,1733400130958, seqNum=-1] 2024-12-05T12:05:19,406 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-05T12:05:19,407 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56402, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-05T12:05:19,409 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testConsecutiveExports', locateType=CURRENT is [region=hbase:acl,,1733400133831.02119c1ced80736cbd0ab02ca19ee30c., hostname=80666a11a09f,42305,1733400131012, seqNum=2] 2024-12-05T12:05:19,409 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-05T12:05:19,411 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52360, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-05T12:05:19,412 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947. 2024-12-05T12:05:19,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor153.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-05T12:05:19,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T12:05:19,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T12:05:19,413 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-05T12:05:19,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] access.PermissionStorage(613): Read acl: entry[testtb-testConsecutiveExports], kv [jenkins: RWXCA] 2024-12-05T12:05:19,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-05T12:05:19,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] procedure2.ProcedureExecutor(1139): Stored pid=119, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=119, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } 2024-12-05T12:05:19,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 }, snapshot procedure id = 119 2024-12-05T12:05:19,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=119 2024-12-05T12:05:19,417 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=119, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=119, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-05T12:05:19,418 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=119, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=119, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-05T12:05:19,420 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=119, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=119, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-05T12:05:19,426 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742096_1272 (size=161) 2024-12-05T12:05:19,427 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742096_1272 (size=161) 2024-12-05T12:05:19,427 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742096_1272 (size=161) 2024-12-05T12:05:19,428 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=119, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=119, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-05T12:05:19,428 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=120, ppid=119, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 4e3cf411680795f234fe865ede1cac68}, {pid=121, ppid=119, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 9909a1dd17e17384969b8f6599a993d2}] 2024-12-05T12:05:19,429 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=120, ppid=119, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 4e3cf411680795f234fe865ede1cac68 2024-12-05T12:05:19,429 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=121, ppid=119, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 9909a1dd17e17384969b8f6599a993d2 2024-12-05T12:05:19,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=119 2024-12-05T12:05:19,581 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42305 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=121 2024-12-05T12:05:19,581 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42913 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=120 2024-12-05T12:05:19,581 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=120}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testConsecutiveExports,,1733400318721.4e3cf411680795f234fe865ede1cac68. 2024-12-05T12:05:19,581 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=121}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testConsecutiveExports,1,1733400318721.9909a1dd17e17384969b8f6599a993d2. 2024-12-05T12:05:19,582 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=121}] regionserver.HRegion(2603): Flush status journal for 9909a1dd17e17384969b8f6599a993d2: 2024-12-05T12:05:19,582 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=120}] regionserver.HRegion(2603): Flush status journal for 4e3cf411680795f234fe865ede1cac68: 2024-12-05T12:05:19,582 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=120}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testConsecutiveExports,,1733400318721.4e3cf411680795f234fe865ede1cac68. for emptySnaptb0-testConsecutiveExports completed. 2024-12-05T12:05:19,582 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=121}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testConsecutiveExports,1,1733400318721.9909a1dd17e17384969b8f6599a993d2. for emptySnaptb0-testConsecutiveExports completed. 2024-12-05T12:05:19,582 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=120}] snapshot.SnapshotManifest(241): Storing 'testtb-testConsecutiveExports,,1733400318721.4e3cf411680795f234fe865ede1cac68.' region-info for snapshot=emptySnaptb0-testConsecutiveExports 2024-12-05T12:05:19,582 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=121}] snapshot.SnapshotManifest(241): Storing 'testtb-testConsecutiveExports,1,1733400318721.9909a1dd17e17384969b8f6599a993d2.' region-info for snapshot=emptySnaptb0-testConsecutiveExports 2024-12-05T12:05:19,582 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=120}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-05T12:05:19,582 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=121}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-05T12:05:19,582 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=120}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-05T12:05:19,582 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=121}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-05T12:05:19,606 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742097_1273 (size=68) 2024-12-05T12:05:19,606 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742097_1273 (size=68) 2024-12-05T12:05:19,606 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742097_1273 (size=68) 2024-12-05T12:05:19,607 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=121}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testConsecutiveExports,1,1733400318721.9909a1dd17e17384969b8f6599a993d2. 2024-12-05T12:05:19,607 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=121}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=121 2024-12-05T12:05:19,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] master.HMaster(4169): Remote procedure done, pid=121 2024-12-05T12:05:19,608 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testConsecutiveExports on region 9909a1dd17e17384969b8f6599a993d2 2024-12-05T12:05:19,608 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=121, ppid=119, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 9909a1dd17e17384969b8f6599a993d2 2024-12-05T12:05:19,611 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=121, ppid=119, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 9909a1dd17e17384969b8f6599a993d2 in 181 msec 2024-12-05T12:05:19,618 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742098_1274 (size=68) 2024-12-05T12:05:19,618 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742098_1274 (size=68) 2024-12-05T12:05:19,619 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742098_1274 (size=68) 2024-12-05T12:05:19,619 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=120}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testConsecutiveExports,,1733400318721.4e3cf411680795f234fe865ede1cac68. 2024-12-05T12:05:19,619 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=120}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=120 2024-12-05T12:05:19,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] master.HMaster(4169): Remote procedure done, pid=120 2024-12-05T12:05:19,620 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testConsecutiveExports on region 4e3cf411680795f234fe865ede1cac68 2024-12-05T12:05:19,620 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=120, ppid=119, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 4e3cf411680795f234fe865ede1cac68 2024-12-05T12:05:19,624 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=120, resume processing ppid=119 2024-12-05T12:05:19,624 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=119, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=119, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-05T12:05:19,624 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=120, ppid=119, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 4e3cf411680795f234fe865ede1cac68 in 193 msec 2024-12-05T12:05:19,625 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=119, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=119, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-05T12:05:19,627 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-12-05T12:05:19,627 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-12-05T12:05:19,627 DEBUG [MobRegionSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T12:05:19,628 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(328): No files under family: cf 2024-12-05T12:05:19,638 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742099_1275 (size=60) 2024-12-05T12:05:19,638 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742099_1275 (size=60) 2024-12-05T12:05:19,639 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742099_1275 (size=60) 2024-12-05T12:05:19,640 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=119, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=119, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-05T12:05:19,640 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testConsecutiveExports 2024-12-05T12:05:19,641 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/.hbase-snapshot/.tmp/emptySnaptb0-testConsecutiveExports 2024-12-05T12:05:19,666 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742100_1276 (size=641) 2024-12-05T12:05:19,666 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742100_1276 (size=641) 2024-12-05T12:05:19,667 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742100_1276 (size=641) 2024-12-05T12:05:19,670 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=119, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=119, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-05T12:05:19,688 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=119, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=119, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-05T12:05:19,689 DEBUG [PEWorker-1 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/.hbase-snapshot/.tmp/emptySnaptb0-testConsecutiveExports to hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/.hbase-snapshot/emptySnaptb0-testConsecutiveExports 2024-12-05T12:05:19,690 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=119, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=119, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-05T12:05:19,690 DEBUG [PEWorker-1 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 }, snapshot procedure id = 119 2024-12-05T12:05:19,692 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=119, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=119, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } in 276 msec 2024-12-05T12:05:19,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=119 2024-12-05T12:05:19,738 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testConsecutiveExports completed 2024-12-05T12:05:19,744 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42913 {}] regionserver.HRegion(8528): writing data to region testtb-testConsecutiveExports,,1733400318721.4e3cf411680795f234fe865ede1cac68. with WAL disabled. Data may be lost in the event of a crash. 2024-12-05T12:05:19,747 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42305 {}] regionserver.HRegion(8528): writing data to region testtb-testConsecutiveExports,1,1733400318721.9909a1dd17e17384969b8f6599a993d2. with WAL disabled. Data may be lost in the event of a crash. 2024-12-05T12:05:19,748 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testConsecutiveExports,, stopping at row=testtb-testConsecutiveExports ,, for max=2147483647 with caching=100 2024-12-05T12:05:19,752 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testConsecutiveExports 2024-12-05T12:05:19,752 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testConsecutiveExports,,1733400318721.4e3cf411680795f234fe865ede1cac68. 2024-12-05T12:05:19,752 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-05T12:05:19,754 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testConsecutiveExports,, stopping at row=testtb-testConsecutiveExports ,, for max=2147483647 with caching=100 2024-12-05T12:05:19,760 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testConsecutiveExports,, stopping at row=testtb-testConsecutiveExports ,, for max=2147483647 with caching=100 2024-12-05T12:05:19,766 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testConsecutiveExports,, stopping at row=testtb-testConsecutiveExports ,, for max=2147483647 with caching=100 2024-12-05T12:05:19,769 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } 2024-12-05T12:05:19,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1733400319769 (current time:1733400319769). 2024-12-05T12:05:19,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-05T12:05:19,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snaptb0-testConsecutiveExports VERSION not specified, setting to 2 2024-12-05T12:05:19,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-05T12:05:19,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@31b8b2cc, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T12:05:19,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] client.ClusterIdFetcher(90): Going to request 80666a11a09f,34947,-1 for getting cluster id 2024-12-05T12:05:19,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-05T12:05:19,771 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'af3bc8f5-5d92-4279-9ca6-4b73c59b8f54' 2024-12-05T12:05:19,771 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-05T12:05:19,771 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "af3bc8f5-5d92-4279-9ca6-4b73c59b8f54" 2024-12-05T12:05:19,771 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2e3a577b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T12:05:19,771 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [80666a11a09f,34947,-1] 2024-12-05T12:05:19,771 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-05T12:05:19,772 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T12:05:19,772 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46170, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-05T12:05:19,773 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2f7bc1b6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T12:05:19,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-05T12:05:19,774 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=80666a11a09f,37629,1733400130958, seqNum=-1] 2024-12-05T12:05:19,774 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-05T12:05:19,776 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56406, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-05T12:05:19,777 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947. 2024-12-05T12:05:19,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-05T12:05:19,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T12:05:19,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T12:05:19,777 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-05T12:05:19,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7b4a1f37, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T12:05:19,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] client.ClusterIdFetcher(90): Going to request 80666a11a09f,34947,-1 for getting cluster id 2024-12-05T12:05:19,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-05T12:05:19,779 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'af3bc8f5-5d92-4279-9ca6-4b73c59b8f54' 2024-12-05T12:05:19,780 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-05T12:05:19,780 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "af3bc8f5-5d92-4279-9ca6-4b73c59b8f54" 2024-12-05T12:05:19,780 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@543bd5cf, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T12:05:19,780 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [80666a11a09f,34947,-1] 2024-12-05T12:05:19,780 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-05T12:05:19,780 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T12:05:19,781 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46190, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-05T12:05:19,782 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6f99c6e0, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T12:05:19,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-05T12:05:19,783 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=80666a11a09f,37629,1733400130958, seqNum=-1] 2024-12-05T12:05:19,784 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-05T12:05:19,785 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56408, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-05T12:05:19,787 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testConsecutiveExports', locateType=CURRENT is [region=hbase:acl,,1733400133831.02119c1ced80736cbd0ab02ca19ee30c., hostname=80666a11a09f,42305,1733400131012, seqNum=2] 2024-12-05T12:05:19,787 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-05T12:05:19,788 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52374, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-05T12:05:19,790 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947. 2024-12-05T12:05:19,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor153.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-05T12:05:19,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T12:05:19,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T12:05:19,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] access.PermissionStorage(613): Read acl: entry[testtb-testConsecutiveExports], kv [jenkins: RWXCA] 2024-12-05T12:05:19,791 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-05T12:05:19,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-05T12:05:19,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] procedure2.ProcedureExecutor(1139): Stored pid=122, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } 2024-12-05T12:05:19,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 }, snapshot procedure id = 122 2024-12-05T12:05:19,793 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=122, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-05T12:05:19,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=122 2024-12-05T12:05:19,795 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=122, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-05T12:05:19,798 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=122, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-05T12:05:19,803 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742101_1277 (size=156) 2024-12-05T12:05:19,803 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742101_1277 (size=156) 2024-12-05T12:05:19,803 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742101_1277 (size=156) 2024-12-05T12:05:19,805 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=122, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-05T12:05:19,805 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=123, ppid=122, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 4e3cf411680795f234fe865ede1cac68}, {pid=124, ppid=122, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 9909a1dd17e17384969b8f6599a993d2}] 2024-12-05T12:05:19,806 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=124, ppid=122, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 9909a1dd17e17384969b8f6599a993d2 2024-12-05T12:05:19,806 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=123, ppid=122, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 4e3cf411680795f234fe865ede1cac68 2024-12-05T12:05:19,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=122 2024-12-05T12:05:19,958 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42305 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=124 2024-12-05T12:05:19,958 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42913 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=123 2024-12-05T12:05:19,958 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testConsecutiveExports,1,1733400318721.9909a1dd17e17384969b8f6599a993d2. 2024-12-05T12:05:19,958 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testConsecutiveExports,,1733400318721.4e3cf411680795f234fe865ede1cac68. 2024-12-05T12:05:19,959 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] regionserver.HRegion(2902): Flushing 9909a1dd17e17384969b8f6599a993d2 1/1 column families, dataSize=3.19 KB heapSize=7.14 KB 2024-12-05T12:05:19,959 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] regionserver.HRegion(2902): Flushing 4e3cf411680795f234fe865ede1cac68 1/1 column families, dataSize=65 B heapSize=400 B 2024-12-05T12:05:19,980 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412050bb38da3928f41bfaffece8a8f37846e_4e3cf411680795f234fe865ede1cac68 is 69, key is 01e861c731451808f7812b777c9f042ed/cf:q/1733400319744/Put/seqid=0 2024-12-05T12:05:19,983 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b20241205b49f36e276fc4524990b8a75e25ccd30_9909a1dd17e17384969b8f6599a993d2 is 71, key is 113c1cbc026597a1c59fea27a3cc407e/cf:q/1733400319747/Put/seqid=0 2024-12-05T12:05:19,995 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742102_1278 (size=4964) 2024-12-05T12:05:19,996 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742102_1278 (size=4964) 2024-12-05T12:05:19,996 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742102_1278 (size=4964) 2024-12-05T12:05:19,998 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T12:05:20,004 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412050bb38da3928f41bfaffece8a8f37846e_4e3cf411680795f234fe865ede1cac68 to hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/mobdir/data/default/testtb-testConsecutiveExports/c7b48a2e62736c517f8bfa7d64fc37ac/cf/d41d8cd98f00b204e9800998ecf8427e202412050bb38da3928f41bfaffece8a8f37846e_4e3cf411680795f234fe865ede1cac68 2024-12-05T12:05:20,006 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testConsecutiveExports/4e3cf411680795f234fe865ede1cac68/.tmp/cf/33ccfe4260444d849c076f193d9dcbee, store: [table=testtb-testConsecutiveExports family=cf region=4e3cf411680795f234fe865ede1cac68] 2024-12-05T12:05:20,007 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testConsecutiveExports/4e3cf411680795f234fe865ede1cac68/.tmp/cf/33ccfe4260444d849c076f193d9dcbee is 206, key is 01e861c731451808f7812b777c9f042ed/cf:q/1733400319744/Put/seqid=0 2024-12-05T12:05:20,013 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742103_1279 (size=8311) 2024-12-05T12:05:20,013 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742103_1279 (size=8311) 2024-12-05T12:05:20,020 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742103_1279 (size=8311) 2024-12-05T12:05:20,022 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T12:05:20,028 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b20241205b49f36e276fc4524990b8a75e25ccd30_9909a1dd17e17384969b8f6599a993d2 to hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/mobdir/data/default/testtb-testConsecutiveExports/c7b48a2e62736c517f8bfa7d64fc37ac/cf/c4ca4238a0b923820dcc509a6f75849b20241205b49f36e276fc4524990b8a75e25ccd30_9909a1dd17e17384969b8f6599a993d2 2024-12-05T12:05:20,030 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testConsecutiveExports/9909a1dd17e17384969b8f6599a993d2/.tmp/cf/bb28d51274ff40288ad221ce8750d50d, store: [table=testtb-testConsecutiveExports family=cf region=9909a1dd17e17384969b8f6599a993d2] 2024-12-05T12:05:20,030 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testConsecutiveExports/9909a1dd17e17384969b8f6599a993d2/.tmp/cf/bb28d51274ff40288ad221ce8750d50d is 206, key is 16ab44080e2b0792f89b1b8aa6fc00d6e/cf:q/1733400319747/Put/seqid=0 2024-12-05T12:05:20,046 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742104_1280 (size=5498) 2024-12-05T12:05:20,046 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742104_1280 (size=5498) 2024-12-05T12:05:20,046 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742104_1280 (size=5498) 2024-12-05T12:05:20,047 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=6, memsize=65, hasBloomFilter=true, into tmp file hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testConsecutiveExports/4e3cf411680795f234fe865ede1cac68/.tmp/cf/33ccfe4260444d849c076f193d9dcbee 2024-12-05T12:05:20,057 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testConsecutiveExports/4e3cf411680795f234fe865ede1cac68/.tmp/cf/33ccfe4260444d849c076f193d9dcbee as hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testConsecutiveExports/4e3cf411680795f234fe865ede1cac68/cf/33ccfe4260444d849c076f193d9dcbee 2024-12-05T12:05:20,063 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testConsecutiveExports/4e3cf411680795f234fe865ede1cac68/cf/33ccfe4260444d849c076f193d9dcbee, entries=1, sequenceid=6, filesize=5.4 K 2024-12-05T12:05:20,064 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] regionserver.HRegion(3140): Finished flush of dataSize ~65 B/65, heapSize ~384 B/384, currentSize=0 B/0 for 4e3cf411680795f234fe865ede1cac68 in 105ms, sequenceid=6, compaction requested=false 2024-12-05T12:05:20,064 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testConsecutiveExports' 2024-12-05T12:05:20,064 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] regionserver.HRegion(2603): Flush status journal for 4e3cf411680795f234fe865ede1cac68: 2024-12-05T12:05:20,064 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testConsecutiveExports,,1733400318721.4e3cf411680795f234fe865ede1cac68. for snaptb0-testConsecutiveExports completed. 2024-12-05T12:05:20,064 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] snapshot.SnapshotManifest(241): Storing 'testtb-testConsecutiveExports,,1733400318721.4e3cf411680795f234fe865ede1cac68.' region-info for snapshot=snaptb0-testConsecutiveExports 2024-12-05T12:05:20,064 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-05T12:05:20,064 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testConsecutiveExports/4e3cf411680795f234fe865ede1cac68/cf/33ccfe4260444d849c076f193d9dcbee] hfiles 2024-12-05T12:05:20,064 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testConsecutiveExports/4e3cf411680795f234fe865ede1cac68/cf/33ccfe4260444d849c076f193d9dcbee for snapshot=snaptb0-testConsecutiveExports 2024-12-05T12:05:20,072 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742105_1281 (size=15257) 2024-12-05T12:05:20,072 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742105_1281 (size=15257) 2024-12-05T12:05:20,072 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742105_1281 (size=15257) 2024-12-05T12:05:20,073 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=6, memsize=3.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testConsecutiveExports/9909a1dd17e17384969b8f6599a993d2/.tmp/cf/bb28d51274ff40288ad221ce8750d50d 2024-12-05T12:05:20,080 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testConsecutiveExports/9909a1dd17e17384969b8f6599a993d2/.tmp/cf/bb28d51274ff40288ad221ce8750d50d as hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testConsecutiveExports/9909a1dd17e17384969b8f6599a993d2/cf/bb28d51274ff40288ad221ce8750d50d 2024-12-05T12:05:20,087 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testConsecutiveExports/9909a1dd17e17384969b8f6599a993d2/cf/bb28d51274ff40288ad221ce8750d50d, entries=49, sequenceid=6, filesize=14.9 K 2024-12-05T12:05:20,088 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] regionserver.HRegion(3140): Finished flush of dataSize ~3.19 KB/3271, heapSize ~7.13 KB/7296, currentSize=0 B/0 for 9909a1dd17e17384969b8f6599a993d2 in 129ms, sequenceid=6, compaction requested=false 2024-12-05T12:05:20,088 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] regionserver.HRegion(2603): Flush status journal for 9909a1dd17e17384969b8f6599a993d2: 2024-12-05T12:05:20,088 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testConsecutiveExports,1,1733400318721.9909a1dd17e17384969b8f6599a993d2. for snaptb0-testConsecutiveExports completed. 2024-12-05T12:05:20,089 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] snapshot.SnapshotManifest(241): Storing 'testtb-testConsecutiveExports,1,1733400318721.9909a1dd17e17384969b8f6599a993d2.' region-info for snapshot=snaptb0-testConsecutiveExports 2024-12-05T12:05:20,089 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-05T12:05:20,089 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testConsecutiveExports/9909a1dd17e17384969b8f6599a993d2/cf/bb28d51274ff40288ad221ce8750d50d] hfiles 2024-12-05T12:05:20,089 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testConsecutiveExports/9909a1dd17e17384969b8f6599a993d2/cf/bb28d51274ff40288ad221ce8750d50d for snapshot=snaptb0-testConsecutiveExports 2024-12-05T12:05:20,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=122 2024-12-05T12:05:20,127 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742106_1282 (size=107) 2024-12-05T12:05:20,128 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742106_1282 (size=107) 2024-12-05T12:05:20,128 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742106_1282 (size=107) 2024-12-05T12:05:20,129 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testConsecutiveExports,,1733400318721.4e3cf411680795f234fe865ede1cac68. 2024-12-05T12:05:20,129 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=123 2024-12-05T12:05:20,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] master.HMaster(4169): Remote procedure done, pid=123 2024-12-05T12:05:20,130 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testConsecutiveExports on region 4e3cf411680795f234fe865ede1cac68 2024-12-05T12:05:20,130 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=123, ppid=122, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 4e3cf411680795f234fe865ede1cac68 2024-12-05T12:05:20,133 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=123, ppid=122, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 4e3cf411680795f234fe865ede1cac68 in 326 msec 2024-12-05T12:05:20,145 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742107_1283 (size=107) 2024-12-05T12:05:20,146 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742107_1283 (size=107) 2024-12-05T12:05:20,146 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742107_1283 (size=107) 2024-12-05T12:05:20,147 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testConsecutiveExports,1,1733400318721.9909a1dd17e17384969b8f6599a993d2. 2024-12-05T12:05:20,147 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=124 2024-12-05T12:05:20,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] master.HMaster(4169): Remote procedure done, pid=124 2024-12-05T12:05:20,148 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testConsecutiveExports on region 9909a1dd17e17384969b8f6599a993d2 2024-12-05T12:05:20,148 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=124, ppid=122, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 9909a1dd17e17384969b8f6599a993d2 2024-12-05T12:05:20,152 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=124, resume processing ppid=122 2024-12-05T12:05:20,152 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=122, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-05T12:05:20,152 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=124, ppid=122, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 9909a1dd17e17384969b8f6599a993d2 in 344 msec 2024-12-05T12:05:20,154 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=122, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-05T12:05:20,161 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-12-05T12:05:20,161 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-12-05T12:05:20,161 DEBUG [MobRegionSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T12:05:20,163 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(366): Adding snapshot references for [hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/mobdir/data/default/testtb-testConsecutiveExports/c7b48a2e62736c517f8bfa7d64fc37ac/cf/c4ca4238a0b923820dcc509a6f75849b20241205b49f36e276fc4524990b8a75e25ccd30_9909a1dd17e17384969b8f6599a993d2, hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/mobdir/data/default/testtb-testConsecutiveExports/c7b48a2e62736c517f8bfa7d64fc37ac/cf/d41d8cd98f00b204e9800998ecf8427e202412050bb38da3928f41bfaffece8a8f37846e_4e3cf411680795f234fe865ede1cac68] hfiles 2024-12-05T12:05:20,163 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (1/2): hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/mobdir/data/default/testtb-testConsecutiveExports/c7b48a2e62736c517f8bfa7d64fc37ac/cf/c4ca4238a0b923820dcc509a6f75849b20241205b49f36e276fc4524990b8a75e25ccd30_9909a1dd17e17384969b8f6599a993d2 2024-12-05T12:05:20,163 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (2/2): hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/mobdir/data/default/testtb-testConsecutiveExports/c7b48a2e62736c517f8bfa7d64fc37ac/cf/d41d8cd98f00b204e9800998ecf8427e202412050bb38da3928f41bfaffece8a8f37846e_4e3cf411680795f234fe865ede1cac68 2024-12-05T12:05:20,173 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742108_1284 (size=291) 2024-12-05T12:05:20,173 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742108_1284 (size=291) 2024-12-05T12:05:20,173 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742108_1284 (size=291) 2024-12-05T12:05:20,175 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=122, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-05T12:05:20,175 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testConsecutiveExports 2024-12-05T12:05:20,176 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/.hbase-snapshot/.tmp/snaptb0-testConsecutiveExports 2024-12-05T12:05:20,196 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742109_1285 (size=951) 2024-12-05T12:05:20,196 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742109_1285 (size=951) 2024-12-05T12:05:20,196 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742109_1285 (size=951) 2024-12-05T12:05:20,205 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=122, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-05T12:05:20,213 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=122, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-05T12:05:20,215 DEBUG [PEWorker-5 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/.hbase-snapshot/.tmp/snaptb0-testConsecutiveExports to hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/.hbase-snapshot/snaptb0-testConsecutiveExports 2024-12-05T12:05:20,225 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=122, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-05T12:05:20,225 DEBUG [PEWorker-5 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 }, snapshot procedure id = 122 2024-12-05T12:05:20,230 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=122, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } in 434 msec 2024-12-05T12:05:20,361 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testConsecutiveExports 2024-12-05T12:05:20,361 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testConsecutiveExports Metrics about Tables on a single HBase RegionServer 2024-12-05T12:05:20,362 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemState 2024-12-05T12:05:20,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=122 2024-12-05T12:05:20,421 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testConsecutiveExports completed 2024-12-05T12:05:20,421 INFO [Time-limited test {}] snapshot.TestExportSnapshot(523): Local export destination path: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/b25dcafd-a00c-c924-93f8-4d057d2ca201/local-export-1733400320421 2024-12-05T12:05:20,422 INFO [Time-limited test {}] snapshot.TestExportSnapshot(542): tgtFsUri=file:///, tgtDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/b25dcafd-a00c-c924-93f8-4d057d2ca201/local-export-1733400320421, rawTgtDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/b25dcafd-a00c-c924-93f8-4d057d2ca201/local-export-1733400320421, srcFsUri=hdfs://localhost:37541, srcDir=hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300 2024-12-05T12:05:20,462 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1094): inputFs=hdfs://localhost:37541, inputRoot=hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300 2024-12-05T12:05:20,462 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1095): outputFs=org.apache.hadoop.fs.LocalFileSystem@684dc976, outputRoot=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/b25dcafd-a00c-c924-93f8-4d057d2ca201/local-export-1733400320421, skipTmp=false, initialOutputSnapshotDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/b25dcafd-a00c-c924-93f8-4d057d2ca201/local-export-1733400320421/.hbase-snapshot/.tmp/snaptb0-testConsecutiveExports 2024-12-05T12:05:20,464 INFO [Time-limited test {}] snapshot.ExportSnapshot(1104): Verify the source snapshot's expiration status and integrity. 2024-12-05T12:05:20,475 INFO [Time-limited test {}] snapshot.ExportSnapshot(1162): Copy Snapshot Manifest from hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/.hbase-snapshot/snaptb0-testConsecutiveExports to file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/b25dcafd-a00c-c924-93f8-4d057d2ca201/local-export-1733400320421/.hbase-snapshot/.tmp/snaptb0-testConsecutiveExports 2024-12-05T12:05:20,531 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-common/target/hbase-common-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-05T12:05:20,532 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-protocol-shaded/target/hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-05T12:05:20,532 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-client/target/hbase-client-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-05T12:05:21,560 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/b25dcafd-a00c-c924-93f8-4d057d2ca201/hadoop-9870306913139861728.jar 2024-12-05T12:05:21,560 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-05T12:05:21,561 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-05T12:05:21,621 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/b25dcafd-a00c-c924-93f8-4d057d2ca201/hadoop-3280900942127921473.jar 2024-12-05T12:05:21,621 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics/target/hbase-metrics-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-05T12:05:21,621 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics-api/target/hbase-metrics-api-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-05T12:05:21,622 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-replication/target/hbase-replication-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-05T12:05:21,622 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-http/target/hbase-http-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-05T12:05:21,622 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-procedure/target/hbase-procedure-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-05T12:05:21,622 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-zookeeper/target/hbase-zookeeper-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-05T12:05:21,622 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-12-05T12:05:21,623 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-12-05T12:05:21,623 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-12-05T12:05:21,623 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-12-05T12:05:21,624 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-12-05T12:05:21,624 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-12-05T12:05:21,624 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-12-05T12:05:21,624 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-12-05T12:05:21,625 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-12-05T12:05:21,625 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-12-05T12:05:21,625 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-12-05T12:05:21,626 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-05T12:05:21,626 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-05T12:05:21,626 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-05T12:05:21,627 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-05T12:05:21,627 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-05T12:05:21,627 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-05T12:05:21,627 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-05T12:05:21,677 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742110_1286 (size=131440) 2024-12-05T12:05:21,677 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742110_1286 (size=131440) 2024-12-05T12:05:21,677 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742110_1286 (size=131440) 2024-12-05T12:05:21,694 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742111_1287 (size=4188619) 2024-12-05T12:05:21,694 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742111_1287 (size=4188619) 2024-12-05T12:05:21,694 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742111_1287 (size=4188619) 2024-12-05T12:05:21,703 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742112_1288 (size=1323991) 2024-12-05T12:05:21,703 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742112_1288 (size=1323991) 2024-12-05T12:05:21,703 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742112_1288 (size=1323991) 2024-12-05T12:05:21,720 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742113_1289 (size=6425023) 2024-12-05T12:05:21,720 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742113_1289 (size=6425023) 2024-12-05T12:05:21,720 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742113_1289 (size=6425023) 2024-12-05T12:05:21,728 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742114_1290 (size=903934) 2024-12-05T12:05:21,728 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742114_1290 (size=903934) 2024-12-05T12:05:21,728 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742114_1290 (size=903934) 2024-12-05T12:05:21,749 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742115_1291 (size=8360360) 2024-12-05T12:05:21,749 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742115_1291 (size=8360360) 2024-12-05T12:05:21,749 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742115_1291 (size=8360360) 2024-12-05T12:05:21,759 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742116_1292 (size=1877034) 2024-12-05T12:05:21,759 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742116_1292 (size=1877034) 2024-12-05T12:05:21,759 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742116_1292 (size=1877034) 2024-12-05T12:05:21,764 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742117_1293 (size=77835) 2024-12-05T12:05:21,765 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742117_1293 (size=77835) 2024-12-05T12:05:21,765 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742117_1293 (size=77835) 2024-12-05T12:05:21,772 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742118_1294 (size=30949) 2024-12-05T12:05:21,772 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742118_1294 (size=30949) 2024-12-05T12:05:21,772 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742118_1294 (size=30949) 2024-12-05T12:05:21,781 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742119_1295 (size=1597213) 2024-12-05T12:05:21,781 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742119_1295 (size=1597213) 2024-12-05T12:05:21,781 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742119_1295 (size=1597213) 2024-12-05T12:05:21,798 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742120_1296 (size=4695811) 2024-12-05T12:05:21,798 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742120_1296 (size=4695811) 2024-12-05T12:05:21,798 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742120_1296 (size=4695811) 2024-12-05T12:05:21,804 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742121_1297 (size=232957) 2024-12-05T12:05:21,805 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742121_1297 (size=232957) 2024-12-05T12:05:21,805 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742121_1297 (size=232957) 2024-12-05T12:05:21,812 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742122_1298 (size=127628) 2024-12-05T12:05:21,812 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742122_1298 (size=127628) 2024-12-05T12:05:21,812 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742122_1298 (size=127628) 2024-12-05T12:05:21,818 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742123_1299 (size=20406) 2024-12-05T12:05:21,818 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742123_1299 (size=20406) 2024-12-05T12:05:21,818 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742123_1299 (size=20406) 2024-12-05T12:05:21,836 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742124_1300 (size=5175431) 2024-12-05T12:05:21,836 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742124_1300 (size=5175431) 2024-12-05T12:05:21,836 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742124_1300 (size=5175431) 2024-12-05T12:05:21,843 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742125_1301 (size=443172) 2024-12-05T12:05:21,843 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742125_1301 (size=443172) 2024-12-05T12:05:21,843 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742125_1301 (size=443172) 2024-12-05T12:05:21,849 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742126_1302 (size=217634) 2024-12-05T12:05:21,850 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742126_1302 (size=217634) 2024-12-05T12:05:21,850 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742126_1302 (size=217634) 2024-12-05T12:05:21,863 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742127_1303 (size=1832290) 2024-12-05T12:05:21,863 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742127_1303 (size=1832290) 2024-12-05T12:05:21,864 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742127_1303 (size=1832290) 2024-12-05T12:05:21,870 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742128_1304 (size=322274) 2024-12-05T12:05:21,870 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742128_1304 (size=322274) 2024-12-05T12:05:21,871 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742128_1304 (size=322274) 2024-12-05T12:05:21,877 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742129_1305 (size=503880) 2024-12-05T12:05:21,877 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742129_1305 (size=503880) 2024-12-05T12:05:21,878 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742129_1305 (size=503880) 2024-12-05T12:05:21,884 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742130_1306 (size=29229) 2024-12-05T12:05:21,884 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742130_1306 (size=29229) 2024-12-05T12:05:21,884 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742130_1306 (size=29229) 2024-12-05T12:05:21,890 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742131_1307 (size=24096) 2024-12-05T12:05:21,890 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742131_1307 (size=24096) 2024-12-05T12:05:21,891 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742131_1307 (size=24096) 2024-12-05T12:05:21,897 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742132_1308 (size=111872) 2024-12-05T12:05:21,897 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742132_1308 (size=111872) 2024-12-05T12:05:21,897 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742132_1308 (size=111872) 2024-12-05T12:05:21,903 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742133_1309 (size=45609) 2024-12-05T12:05:21,903 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742133_1309 (size=45609) 2024-12-05T12:05:21,903 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742133_1309 (size=45609) 2024-12-05T12:05:21,909 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742134_1310 (size=136454) 2024-12-05T12:05:21,909 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742134_1310 (size=136454) 2024-12-05T12:05:21,909 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742134_1310 (size=136454) 2024-12-05T12:05:21,910 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-12-05T12:05:21,912 INFO [Time-limited test {}] snapshot.ExportSnapshot(663): Loading Snapshot 'snaptb0-testConsecutiveExports' hfile list 2024-12-05T12:05:21,914 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=0 size=14.9 K 2024-12-05T12:05:21,914 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=1 size=8.1 K 2024-12-05T12:05:21,914 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=2 size=5.4 K 2024-12-05T12:05:21,914 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=3 size=4.8 K 2024-12-05T12:05:21,919 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742135_1311 (size=1023) 2024-12-05T12:05:21,919 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742135_1311 (size=1023) 2024-12-05T12:05:21,919 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742135_1311 (size=1023) 2024-12-05T12:05:21,924 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742136_1312 (size=35) 2024-12-05T12:05:21,924 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742136_1312 (size=35) 2024-12-05T12:05:21,925 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742136_1312 (size=35) 2024-12-05T12:05:21,944 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742137_1313 (size=304041) 2024-12-05T12:05:21,944 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742137_1313 (size=304041) 2024-12-05T12:05:21,944 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742137_1313 (size=304041) 2024-12-05T12:05:22,652 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-05T12:05:22,652 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-05T12:05:22,655 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733400137807_0004_000001 (auth:SIMPLE) from 127.0.0.1:57870 2024-12-05T12:05:22,664 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_693627247/yarn-7091037402/MiniMRCluster_693627247-localDir-nm-0_3/usercache/jenkins/appcache/application_1733400137807_0004/container_1733400137807_0004_01_000001/launch_container.sh] 2024-12-05T12:05:22,664 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_693627247/yarn-7091037402/MiniMRCluster_693627247-localDir-nm-0_3/usercache/jenkins/appcache/application_1733400137807_0004/container_1733400137807_0004_01_000001/container_tokens] 2024-12-05T12:05:22,664 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_693627247/yarn-7091037402/MiniMRCluster_693627247-localDir-nm-0_3/usercache/jenkins/appcache/application_1733400137807_0004/container_1733400137807_0004_01_000001/sysfs] 2024-12-05T12:05:23,557 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733400137807_0005_000001 (auth:SIMPLE) from 127.0.0.1:51670 2024-12-05T12:05:23,989 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-05T12:05:30,764 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733400137807_0005_000001 (auth:SIMPLE) from 127.0.0.1:52670 2024-12-05T12:05:31,040 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742138_1314 (size=349739) 2024-12-05T12:05:31,040 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742138_1314 (size=349739) 2024-12-05T12:05:31,041 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742138_1314 (size=349739) 2024-12-05T12:05:32,961 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733400137807_0005_000001 (auth:SIMPLE) from 127.0.0.1:49242 2024-12-05T12:05:32,961 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733400137807_0005_000001 (auth:SIMPLE) from 127.0.0.1:38168 2024-12-05T12:05:33,855 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733400137807_0005_000001 (auth:SIMPLE) from 127.0.0.1:49244 2024-12-05T12:05:33,862 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733400137807_0005_000001 (auth:SIMPLE) from 127.0.0.1:38172 2024-12-05T12:05:36,657 WARN [NM Event dispatcher {}] containermanager.ContainerManagerImpl(1784): couldn't find container container_1733400137807_0005_01_000006 while processing FINISH_CONTAINERS event 2024-12-05T12:05:37,987 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_693627247/yarn-7091037402/MiniMRCluster_693627247-localDir-nm-1_0/usercache/jenkins/appcache/application_1733400137807_0005/container_1733400137807_0005_01_000002/launch_container.sh] 2024-12-05T12:05:37,987 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_693627247/yarn-7091037402/MiniMRCluster_693627247-localDir-nm-1_0/usercache/jenkins/appcache/application_1733400137807_0005/container_1733400137807_0005_01_000002/container_tokens] 2024-12-05T12:05:37,988 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_693627247/yarn-7091037402/MiniMRCluster_693627247-localDir-nm-1_0/usercache/jenkins/appcache/application_1733400137807_0005/container_1733400137807_0005_01_000002/sysfs] 2024-12-05T12:05:38,982 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-05T12:05:39,055 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_693627247/yarn-7091037402/MiniMRCluster_693627247-localDir-nm-0_1/usercache/jenkins/appcache/application_1733400137807_0005/container_1733400137807_0005_01_000003/launch_container.sh] 2024-12-05T12:05:39,055 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_693627247/yarn-7091037402/MiniMRCluster_693627247-localDir-nm-0_1/usercache/jenkins/appcache/application_1733400137807_0005/container_1733400137807_0005_01_000003/container_tokens] 2024-12-05T12:05:39,056 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_693627247/yarn-7091037402/MiniMRCluster_693627247-localDir-nm-0_1/usercache/jenkins/appcache/application_1733400137807_0005/container_1733400137807_0005_01_000003/sysfs] 2024-12-05T12:05:39,716 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_693627247/yarn-7091037402/MiniMRCluster_693627247-localDir-nm-0_3/usercache/jenkins/appcache/application_1733400137807_0005/container_1733400137807_0005_01_000005/launch_container.sh] 2024-12-05T12:05:39,716 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_693627247/yarn-7091037402/MiniMRCluster_693627247-localDir-nm-0_3/usercache/jenkins/appcache/application_1733400137807_0005/container_1733400137807_0005_01_000005/container_tokens] 2024-12-05T12:05:39,716 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_693627247/yarn-7091037402/MiniMRCluster_693627247-localDir-nm-0_3/usercache/jenkins/appcache/application_1733400137807_0005/container_1733400137807_0005_01_000005/sysfs] 2024-12-05T12:05:39,742 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742139_1315 (size=31813) 2024-12-05T12:05:39,742 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742139_1315 (size=31813) 2024-12-05T12:05:39,742 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742139_1315 (size=31813) 2024-12-05T12:05:39,797 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_693627247/yarn-7091037402/MiniMRCluster_693627247-localDir-nm-1_2/usercache/jenkins/appcache/application_1733400137807_0005/container_1733400137807_0005_01_000004/launch_container.sh] 2024-12-05T12:05:39,797 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_693627247/yarn-7091037402/MiniMRCluster_693627247-localDir-nm-1_2/usercache/jenkins/appcache/application_1733400137807_0005/container_1733400137807_0005_01_000004/container_tokens] 2024-12-05T12:05:39,797 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_693627247/yarn-7091037402/MiniMRCluster_693627247-localDir-nm-1_2/usercache/jenkins/appcache/application_1733400137807_0005/container_1733400137807_0005_01_000004/sysfs] 2024-12-05T12:05:40,156 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742140_1316 (size=463) 2024-12-05T12:05:40,157 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742140_1316 (size=463) 2024-12-05T12:05:40,157 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742140_1316 (size=463) 2024-12-05T12:05:40,202 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742141_1317 (size=31813) 2024-12-05T12:05:40,202 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742141_1317 (size=31813) 2024-12-05T12:05:40,202 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742141_1317 (size=31813) 2024-12-05T12:05:40,636 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742142_1318 (size=349739) 2024-12-05T12:05:40,636 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742142_1318 (size=349739) 2024-12-05T12:05:40,637 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742142_1318 (size=349739) 2024-12-05T12:05:42,142 INFO [Time-limited test {}] snapshot.ExportSnapshot(1219): Finalize the Snapshot Export 2024-12-05T12:05:42,142 INFO [Time-limited test {}] snapshot.ExportSnapshot(1230): Verify the exported snapshot's expiration status and integrity. 2024-12-05T12:05:42,146 INFO [Time-limited test {}] snapshot.ExportSnapshot(1236): Export Completed: snaptb0-testConsecutiveExports 2024-12-05T12:05:42,146 INFO [Time-limited test {}] snapshot.TestExportSnapshot(409): Exported snapshot 2024-12-05T12:05:42,146 INFO [Time-limited test {}] snapshot.TestExportSnapshot(420): Verified filesystem state 2024-12-05T12:05:42,146 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-954888707_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/.hbase-snapshot/snaptb0-testConsecutiveExports at hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/.hbase-snapshot/snaptb0-testConsecutiveExports 2024-12-05T12:05:42,147 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/.hbase-snapshot/snaptb0-testConsecutiveExports/.snapshotinfo 2024-12-05T12:05:42,147 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/.hbase-snapshot/snaptb0-testConsecutiveExports/data.manifest 2024-12-05T12:05:42,147 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in org.apache.hadoop.fs.LocalFileSystem@684dc976 in root file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/b25dcafd-a00c-c924-93f8-4d057d2ca201/local-export-1733400320421/.hbase-snapshot/snaptb0-testConsecutiveExports at file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/b25dcafd-a00c-c924-93f8-4d057d2ca201/local-export-1733400320421/.hbase-snapshot/snaptb0-testConsecutiveExports 2024-12-05T12:05:42,147 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/b25dcafd-a00c-c924-93f8-4d057d2ca201/local-export-1733400320421/.hbase-snapshot/snaptb0-testConsecutiveExports/.snapshotinfo 2024-12-05T12:05:42,147 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/b25dcafd-a00c-c924-93f8-4d057d2ca201/local-export-1733400320421/.hbase-snapshot/snaptb0-testConsecutiveExports/data.manifest 2024-12-05T12:05:42,148 INFO [Time-limited test {}] snapshot.TestExportSnapshot(542): tgtFsUri=file:///, tgtDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/b25dcafd-a00c-c924-93f8-4d057d2ca201/local-export-1733400320421, rawTgtDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/b25dcafd-a00c-c924-93f8-4d057d2ca201/local-export-1733400320421, srcFsUri=hdfs://localhost:37541, srcDir=hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300 2024-12-05T12:05:42,180 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1094): inputFs=hdfs://localhost:37541, inputRoot=hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300 2024-12-05T12:05:42,180 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1095): outputFs=org.apache.hadoop.fs.LocalFileSystem@684dc976, outputRoot=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/b25dcafd-a00c-c924-93f8-4d057d2ca201/local-export-1733400320421, skipTmp=false, initialOutputSnapshotDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/b25dcafd-a00c-c924-93f8-4d057d2ca201/local-export-1733400320421/.hbase-snapshot/.tmp/snaptb0-testConsecutiveExports 2024-12-05T12:05:42,182 INFO [Time-limited test {}] snapshot.ExportSnapshot(1104): Verify the source snapshot's expiration status and integrity. 2024-12-05T12:05:42,188 INFO [Time-limited test {}] snapshot.ExportSnapshot(1162): Copy Snapshot Manifest from hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/.hbase-snapshot/snaptb0-testConsecutiveExports to file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/b25dcafd-a00c-c924-93f8-4d057d2ca201/local-export-1733400320421/.hbase-snapshot/.tmp/snaptb0-testConsecutiveExports 2024-12-05T12:05:42,201 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-common/target/hbase-common-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-05T12:05:42,201 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-protocol-shaded/target/hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-05T12:05:42,201 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-client/target/hbase-client-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-05T12:05:43,270 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/b25dcafd-a00c-c924-93f8-4d057d2ca201/hadoop-9397020438512297774.jar 2024-12-05T12:05:43,270 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-05T12:05:43,270 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-05T12:05:43,342 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/b25dcafd-a00c-c924-93f8-4d057d2ca201/hadoop-780840190047997461.jar 2024-12-05T12:05:43,342 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics/target/hbase-metrics-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-05T12:05:43,342 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics-api/target/hbase-metrics-api-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-05T12:05:43,343 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-replication/target/hbase-replication-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-05T12:05:43,343 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-http/target/hbase-http-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-05T12:05:43,343 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-procedure/target/hbase-procedure-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-05T12:05:43,343 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-zookeeper/target/hbase-zookeeper-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-05T12:05:43,343 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-12-05T12:05:43,344 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-12-05T12:05:43,344 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-12-05T12:05:43,344 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-12-05T12:05:43,345 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-12-05T12:05:43,345 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-12-05T12:05:43,345 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-12-05T12:05:43,345 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-12-05T12:05:43,346 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-12-05T12:05:43,346 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-12-05T12:05:43,346 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-12-05T12:05:43,346 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-05T12:05:43,347 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-05T12:05:43,347 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-05T12:05:43,347 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-05T12:05:43,348 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-05T12:05:43,348 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-05T12:05:43,348 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-05T12:05:43,425 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742143_1319 (size=131440) 2024-12-05T12:05:43,427 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742143_1319 (size=131440) 2024-12-05T12:05:43,427 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742143_1319 (size=131440) 2024-12-05T12:05:43,449 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742144_1320 (size=4188619) 2024-12-05T12:05:43,450 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742144_1320 (size=4188619) 2024-12-05T12:05:43,450 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742144_1320 (size=4188619) 2024-12-05T12:05:43,464 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742145_1321 (size=1323991) 2024-12-05T12:05:43,464 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742145_1321 (size=1323991) 2024-12-05T12:05:43,465 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742145_1321 (size=1323991) 2024-12-05T12:05:43,478 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742146_1322 (size=903934) 2024-12-05T12:05:43,478 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742146_1322 (size=903934) 2024-12-05T12:05:43,478 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742146_1322 (size=903934) 2024-12-05T12:05:43,519 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742147_1323 (size=8360360) 2024-12-05T12:05:43,519 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742147_1323 (size=8360360) 2024-12-05T12:05:43,519 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742147_1323 (size=8360360) 2024-12-05T12:05:43,532 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742148_1324 (size=1877034) 2024-12-05T12:05:43,533 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742148_1324 (size=1877034) 2024-12-05T12:05:43,534 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742148_1324 (size=1877034) 2024-12-05T12:05:43,558 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742149_1325 (size=77835) 2024-12-05T12:05:43,559 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742149_1325 (size=77835) 2024-12-05T12:05:43,562 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742149_1325 (size=77835) 2024-12-05T12:05:43,583 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742150_1326 (size=30949) 2024-12-05T12:05:43,583 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742150_1326 (size=30949) 2024-12-05T12:05:43,584 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742150_1326 (size=30949) 2024-12-05T12:05:43,615 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742151_1327 (size=1597213) 2024-12-05T12:05:43,619 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742151_1327 (size=1597213) 2024-12-05T12:05:43,620 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742151_1327 (size=1597213) 2024-12-05T12:05:43,641 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742152_1328 (size=4695811) 2024-12-05T12:05:43,642 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742152_1328 (size=4695811) 2024-12-05T12:05:43,642 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742152_1328 (size=4695811) 2024-12-05T12:05:43,650 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742153_1329 (size=232957) 2024-12-05T12:05:43,650 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742153_1329 (size=232957) 2024-12-05T12:05:43,650 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742153_1329 (size=232957) 2024-12-05T12:05:43,657 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742154_1330 (size=127628) 2024-12-05T12:05:43,657 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742154_1330 (size=127628) 2024-12-05T12:05:43,657 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742154_1330 (size=127628) 2024-12-05T12:05:43,663 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742155_1331 (size=20406) 2024-12-05T12:05:43,663 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742155_1331 (size=20406) 2024-12-05T12:05:43,664 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742155_1331 (size=20406) 2024-12-05T12:05:43,680 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742156_1332 (size=5175431) 2024-12-05T12:05:43,680 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742156_1332 (size=5175431) 2024-12-05T12:05:43,680 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742156_1332 (size=5175431) 2024-12-05T12:05:43,696 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742157_1333 (size=6425023) 2024-12-05T12:05:43,696 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742157_1333 (size=6425023) 2024-12-05T12:05:43,696 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742157_1333 (size=6425023) 2024-12-05T12:05:43,703 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742158_1334 (size=217634) 2024-12-05T12:05:43,703 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742158_1334 (size=217634) 2024-12-05T12:05:43,703 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742158_1334 (size=217634) 2024-12-05T12:05:43,711 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742159_1335 (size=1832290) 2024-12-05T12:05:43,711 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742159_1335 (size=1832290) 2024-12-05T12:05:43,712 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742159_1335 (size=1832290) 2024-12-05T12:05:43,719 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742160_1336 (size=322274) 2024-12-05T12:05:43,719 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742160_1336 (size=322274) 2024-12-05T12:05:43,719 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742160_1336 (size=322274) 2024-12-05T12:05:43,726 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742161_1337 (size=503880) 2024-12-05T12:05:43,726 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742161_1337 (size=503880) 2024-12-05T12:05:43,727 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742161_1337 (size=503880) 2024-12-05T12:05:43,733 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742162_1338 (size=29229) 2024-12-05T12:05:43,733 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742162_1338 (size=29229) 2024-12-05T12:05:43,734 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742162_1338 (size=29229) 2024-12-05T12:05:43,741 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742163_1339 (size=443172) 2024-12-05T12:05:43,741 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742163_1339 (size=443172) 2024-12-05T12:05:43,741 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742163_1339 (size=443172) 2024-12-05T12:05:43,748 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742164_1340 (size=24096) 2024-12-05T12:05:43,748 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742164_1340 (size=24096) 2024-12-05T12:05:43,748 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742164_1340 (size=24096) 2024-12-05T12:05:43,756 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742165_1341 (size=111872) 2024-12-05T12:05:43,756 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742165_1341 (size=111872) 2024-12-05T12:05:43,757 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742165_1341 (size=111872) 2024-12-05T12:05:43,765 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742166_1342 (size=45609) 2024-12-05T12:05:43,765 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742166_1342 (size=45609) 2024-12-05T12:05:43,765 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742166_1342 (size=45609) 2024-12-05T12:05:43,773 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742167_1343 (size=136454) 2024-12-05T12:05:43,773 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742167_1343 (size=136454) 2024-12-05T12:05:43,773 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742167_1343 (size=136454) 2024-12-05T12:05:43,774 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-12-05T12:05:43,776 INFO [Time-limited test {}] snapshot.ExportSnapshot(663): Loading Snapshot 'snaptb0-testConsecutiveExports' hfile list 2024-12-05T12:05:43,778 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=0 size=14.9 K 2024-12-05T12:05:43,778 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=1 size=8.1 K 2024-12-05T12:05:43,778 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=2 size=5.4 K 2024-12-05T12:05:43,778 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=3 size=4.8 K 2024-12-05T12:05:43,788 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742168_1344 (size=1023) 2024-12-05T12:05:43,788 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742168_1344 (size=1023) 2024-12-05T12:05:43,788 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742168_1344 (size=1023) 2024-12-05T12:05:43,793 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742169_1345 (size=35) 2024-12-05T12:05:43,794 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742169_1345 (size=35) 2024-12-05T12:05:43,794 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742169_1345 (size=35) 2024-12-05T12:05:43,807 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742170_1346 (size=304039) 2024-12-05T12:05:43,807 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742170_1346 (size=304039) 2024-12-05T12:05:43,807 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742170_1346 (size=304039) 2024-12-05T12:05:46,759 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-05T12:05:46,759 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-05T12:05:46,779 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733400137807_0005_000001 (auth:SIMPLE) from 127.0.0.1:48396 2024-12-05T12:05:47,718 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733400137807_0006_000001 (auth:SIMPLE) from 127.0.0.1:35540 2024-12-05T12:05:51,870 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_693627247/yarn-7091037402/MiniMRCluster_693627247-localDir-nm-1_3/usercache/jenkins/appcache/application_1733400137807_0005/container_1733400137807_0005_01_000001/launch_container.sh] 2024-12-05T12:05:51,870 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_693627247/yarn-7091037402/MiniMRCluster_693627247-localDir-nm-1_3/usercache/jenkins/appcache/application_1733400137807_0005/container_1733400137807_0005_01_000001/container_tokens] 2024-12-05T12:05:51,870 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_693627247/yarn-7091037402/MiniMRCluster_693627247-localDir-nm-1_3/usercache/jenkins/appcache/application_1733400137807_0005/container_1733400137807_0005_01_000001/sysfs] 2024-12-05T12:05:53,190 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733400137807_0006_000001 (auth:SIMPLE) from 127.0.0.1:35434 2024-12-05T12:05:53,451 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742171_1347 (size=349737) 2024-12-05T12:05:53,452 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742171_1347 (size=349737) 2024-12-05T12:05:53,452 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742171_1347 (size=349737) 2024-12-05T12:05:55,395 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733400137807_0006_000001 (auth:SIMPLE) from 127.0.0.1:34090 2024-12-05T12:05:55,395 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733400137807_0006_000001 (auth:SIMPLE) from 127.0.0.1:56084 2024-12-05T12:05:56,279 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733400137807_0006_000001 (auth:SIMPLE) from 127.0.0.1:56088 2024-12-05T12:05:56,284 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733400137807_0006_000001 (auth:SIMPLE) from 127.0.0.1:34094 2024-12-05T12:05:58,754 WARN [NM Event dispatcher {}] containermanager.ContainerManagerImpl(1784): couldn't find container container_1733400137807_0006_01_000006 while processing FINISH_CONTAINERS event 2024-12-05T12:06:00,222 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_693627247/yarn-7091037402/MiniMRCluster_693627247-localDir-nm-0_1/usercache/jenkins/appcache/application_1733400137807_0006/container_1733400137807_0006_01_000002/launch_container.sh] 2024-12-05T12:06:00,222 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_693627247/yarn-7091037402/MiniMRCluster_693627247-localDir-nm-0_1/usercache/jenkins/appcache/application_1733400137807_0006/container_1733400137807_0006_01_000002/container_tokens] 2024-12-05T12:06:00,222 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_693627247/yarn-7091037402/MiniMRCluster_693627247-localDir-nm-0_1/usercache/jenkins/appcache/application_1733400137807_0006/container_1733400137807_0006_01_000002/sysfs] 2024-12-05T12:06:01,842 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_693627247/yarn-7091037402/MiniMRCluster_693627247-localDir-nm-1_3/usercache/jenkins/appcache/application_1733400137807_0006/container_1733400137807_0006_01_000003/launch_container.sh] 2024-12-05T12:06:01,843 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_693627247/yarn-7091037402/MiniMRCluster_693627247-localDir-nm-1_3/usercache/jenkins/appcache/application_1733400137807_0006/container_1733400137807_0006_01_000003/container_tokens] 2024-12-05T12:06:01,843 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_693627247/yarn-7091037402/MiniMRCluster_693627247-localDir-nm-1_3/usercache/jenkins/appcache/application_1733400137807_0006/container_1733400137807_0006_01_000003/sysfs] 2024-12-05T12:06:02,407 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742172_1348 (size=29751) 2024-12-05T12:06:02,408 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742172_1348 (size=29751) 2024-12-05T12:06:02,408 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742172_1348 (size=29751) 2024-12-05T12:06:02,425 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742173_1349 (size=463) 2024-12-05T12:06:02,425 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742173_1349 (size=463) 2024-12-05T12:06:02,426 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742173_1349 (size=463) 2024-12-05T12:06:02,460 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742174_1350 (size=29751) 2024-12-05T12:06:02,461 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742174_1350 (size=29751) 2024-12-05T12:06:02,461 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742174_1350 (size=29751) 2024-12-05T12:06:02,462 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_693627247/yarn-7091037402/MiniMRCluster_693627247-localDir-nm-1_0/usercache/jenkins/appcache/application_1733400137807_0006/container_1733400137807_0006_01_000005/launch_container.sh] 2024-12-05T12:06:02,462 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_693627247/yarn-7091037402/MiniMRCluster_693627247-localDir-nm-1_0/usercache/jenkins/appcache/application_1733400137807_0006/container_1733400137807_0006_01_000005/container_tokens] 2024-12-05T12:06:02,462 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_693627247/yarn-7091037402/MiniMRCluster_693627247-localDir-nm-1_0/usercache/jenkins/appcache/application_1733400137807_0006/container_1733400137807_0006_01_000005/sysfs] 2024-12-05T12:06:02,480 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742175_1351 (size=349737) 2024-12-05T12:06:02,480 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742175_1351 (size=349737) 2024-12-05T12:06:02,480 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742175_1351 (size=349737) 2024-12-05T12:06:02,493 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733400137807_0006_000001 (auth:SIMPLE) from 127.0.0.1:39990 2024-12-05T12:06:02,500 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733400137807_0006_000001 (auth:SIMPLE) from 127.0.0.1:44376 2024-12-05T12:06:04,099 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 9909a1dd17e17384969b8f6599a993d2, had cached 0 bytes from a total of 15257 2024-12-05T12:06:04,103 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 4e3cf411680795f234fe865ede1cac68, had cached 0 bytes from a total of 5498 2024-12-05T12:06:04,186 INFO [Time-limited test {}] snapshot.ExportSnapshot(1219): Finalize the Snapshot Export 2024-12-05T12:06:04,186 INFO [Time-limited test {}] snapshot.ExportSnapshot(1230): Verify the exported snapshot's expiration status and integrity. 2024-12-05T12:06:04,189 INFO [Time-limited test {}] snapshot.ExportSnapshot(1236): Export Completed: snaptb0-testConsecutiveExports 2024-12-05T12:06:04,189 INFO [Time-limited test {}] snapshot.TestExportSnapshot(409): Exported snapshot 2024-12-05T12:06:04,189 INFO [Time-limited test {}] snapshot.TestExportSnapshot(420): Verified filesystem state 2024-12-05T12:06:04,189 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-954888707_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/.hbase-snapshot/snaptb0-testConsecutiveExports at hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/.hbase-snapshot/snaptb0-testConsecutiveExports 2024-12-05T12:06:04,190 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/.hbase-snapshot/snaptb0-testConsecutiveExports/.snapshotinfo 2024-12-05T12:06:04,190 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/.hbase-snapshot/snaptb0-testConsecutiveExports/data.manifest 2024-12-05T12:06:04,190 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in org.apache.hadoop.fs.LocalFileSystem@684dc976 in root file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/b25dcafd-a00c-c924-93f8-4d057d2ca201/local-export-1733400320421/.hbase-snapshot/snaptb0-testConsecutiveExports at file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/b25dcafd-a00c-c924-93f8-4d057d2ca201/local-export-1733400320421/.hbase-snapshot/snaptb0-testConsecutiveExports 2024-12-05T12:06:04,191 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/b25dcafd-a00c-c924-93f8-4d057d2ca201/local-export-1733400320421/.hbase-snapshot/snaptb0-testConsecutiveExports/.snapshotinfo 2024-12-05T12:06:04,191 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/b25dcafd-a00c-c924-93f8-4d057d2ca201/local-export-1733400320421/.hbase-snapshot/snaptb0-testConsecutiveExports/data.manifest 2024-12-05T12:06:04,208 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.2 disable testtb-testConsecutiveExports 2024-12-05T12:06:04,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] procedure2.ProcedureExecutor(1139): Stored pid=125, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testtb-testConsecutiveExports 2024-12-05T12:06:04,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=125 2024-12-05T12:06:04,213 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testConsecutiveExports","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733400364212"}]},"ts":"1733400364212"} 2024-12-05T12:06:04,215 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testConsecutiveExports, state=DISABLING in hbase:meta 2024-12-05T12:06:04,215 INFO [PEWorker-4 {}] procedure.DisableTableProcedure(284): Set testtb-testConsecutiveExports to state=DISABLING 2024-12-05T12:06:04,216 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=126, ppid=125, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testtb-testConsecutiveExports}] 2024-12-05T12:06:04,218 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=127, ppid=126, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=4e3cf411680795f234fe865ede1cac68, UNASSIGN}, {pid=128, ppid=126, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=9909a1dd17e17384969b8f6599a993d2, UNASSIGN}] 2024-12-05T12:06:04,219 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=127, ppid=126, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=4e3cf411680795f234fe865ede1cac68, UNASSIGN 2024-12-05T12:06:04,219 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=128, ppid=126, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=9909a1dd17e17384969b8f6599a993d2, UNASSIGN 2024-12-05T12:06:04,220 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=128 updating hbase:meta row=9909a1dd17e17384969b8f6599a993d2, regionState=CLOSING, regionLocation=80666a11a09f,42305,1733400131012 2024-12-05T12:06:04,220 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=127 updating hbase:meta row=4e3cf411680795f234fe865ede1cac68, regionState=CLOSING, regionLocation=80666a11a09f,42913,1733400130845 2024-12-05T12:06:04,222 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=128, ppid=126, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=9909a1dd17e17384969b8f6599a993d2, UNASSIGN because future has completed 2024-12-05T12:06:04,222 DEBUG [PEWorker-5 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-05T12:06:04,222 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=129, ppid=128, state=RUNNABLE, hasLock=false; CloseRegionProcedure 9909a1dd17e17384969b8f6599a993d2, server=80666a11a09f,42305,1733400131012}] 2024-12-05T12:06:04,223 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=127, ppid=126, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=4e3cf411680795f234fe865ede1cac68, UNASSIGN because future has completed 2024-12-05T12:06:04,223 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-05T12:06:04,223 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=130, ppid=127, state=RUNNABLE, hasLock=false; CloseRegionProcedure 4e3cf411680795f234fe865ede1cac68, server=80666a11a09f,42913,1733400130845}] 2024-12-05T12:06:04,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=125 2024-12-05T12:06:04,375 INFO [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] handler.UnassignRegionHandler(122): Close 9909a1dd17e17384969b8f6599a993d2 2024-12-05T12:06:04,375 INFO [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=130}] handler.UnassignRegionHandler(122): Close 4e3cf411680795f234fe865ede1cac68 2024-12-05T12:06:04,375 DEBUG [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=130}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-05T12:06:04,375 DEBUG [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-05T12:06:04,375 DEBUG [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] regionserver.HRegion(1722): Closing 9909a1dd17e17384969b8f6599a993d2, disabling compactions & flushes 2024-12-05T12:06:04,375 DEBUG [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=130}] regionserver.HRegion(1722): Closing 4e3cf411680795f234fe865ede1cac68, disabling compactions & flushes 2024-12-05T12:06:04,375 INFO [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] regionserver.HRegion(1755): Closing region testtb-testConsecutiveExports,1,1733400318721.9909a1dd17e17384969b8f6599a993d2. 2024-12-05T12:06:04,375 INFO [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=130}] regionserver.HRegion(1755): Closing region testtb-testConsecutiveExports,,1733400318721.4e3cf411680795f234fe865ede1cac68. 2024-12-05T12:06:04,375 DEBUG [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=130}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testConsecutiveExports,,1733400318721.4e3cf411680795f234fe865ede1cac68. 2024-12-05T12:06:04,375 DEBUG [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testConsecutiveExports,1,1733400318721.9909a1dd17e17384969b8f6599a993d2. 2024-12-05T12:06:04,375 DEBUG [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] regionserver.HRegion(1843): Acquired close lock on testtb-testConsecutiveExports,1,1733400318721.9909a1dd17e17384969b8f6599a993d2. after waiting 0 ms 2024-12-05T12:06:04,375 DEBUG [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=130}] regionserver.HRegion(1843): Acquired close lock on testtb-testConsecutiveExports,,1733400318721.4e3cf411680795f234fe865ede1cac68. after waiting 0 ms 2024-12-05T12:06:04,375 DEBUG [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] regionserver.HRegion(1853): Updates disabled for region testtb-testConsecutiveExports,1,1733400318721.9909a1dd17e17384969b8f6599a993d2. 2024-12-05T12:06:04,375 DEBUG [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=130}] regionserver.HRegion(1853): Updates disabled for region testtb-testConsecutiveExports,,1733400318721.4e3cf411680795f234fe865ede1cac68. 2024-12-05T12:06:04,382 DEBUG [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testConsecutiveExports/9909a1dd17e17384969b8f6599a993d2/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-05T12:06:04,382 DEBUG [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-05T12:06:04,382 DEBUG [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=130}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testConsecutiveExports/4e3cf411680795f234fe865ede1cac68/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-05T12:06:04,382 INFO [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] regionserver.HRegion(1973): Closed testtb-testConsecutiveExports,1,1733400318721.9909a1dd17e17384969b8f6599a993d2. 2024-12-05T12:06:04,382 DEBUG [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] regionserver.HRegion(1676): Region close journal for 9909a1dd17e17384969b8f6599a993d2: Waiting for close lock at 1733400364375Running coprocessor pre-close hooks at 1733400364375Disabling compacts and flushes for region at 1733400364375Disabling writes for close at 1733400364375Writing region close event to WAL at 1733400364376 (+1 ms)Running coprocessor post-close hooks at 1733400364382 (+6 ms)Closed at 1733400364382 2024-12-05T12:06:04,383 DEBUG [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=130}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-05T12:06:04,383 INFO [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=130}] regionserver.HRegion(1973): Closed testtb-testConsecutiveExports,,1733400318721.4e3cf411680795f234fe865ede1cac68. 2024-12-05T12:06:04,383 DEBUG [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=130}] regionserver.HRegion(1676): Region close journal for 4e3cf411680795f234fe865ede1cac68: Waiting for close lock at 1733400364375Running coprocessor pre-close hooks at 1733400364375Disabling compacts and flushes for region at 1733400364375Disabling writes for close at 1733400364375Writing region close event to WAL at 1733400364377 (+2 ms)Running coprocessor post-close hooks at 1733400364383 (+6 ms)Closed at 1733400364383 2024-12-05T12:06:04,385 INFO [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] handler.UnassignRegionHandler(157): Closed 9909a1dd17e17384969b8f6599a993d2 2024-12-05T12:06:04,385 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=128 updating hbase:meta row=9909a1dd17e17384969b8f6599a993d2, regionState=CLOSED 2024-12-05T12:06:04,385 INFO [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=130}] handler.UnassignRegionHandler(157): Closed 4e3cf411680795f234fe865ede1cac68 2024-12-05T12:06:04,386 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=127 updating hbase:meta row=4e3cf411680795f234fe865ede1cac68, regionState=CLOSED 2024-12-05T12:06:04,388 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=129, ppid=128, state=RUNNABLE, hasLock=false; CloseRegionProcedure 9909a1dd17e17384969b8f6599a993d2, server=80666a11a09f,42305,1733400131012 because future has completed 2024-12-05T12:06:04,389 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=130, ppid=127, state=RUNNABLE, hasLock=false; CloseRegionProcedure 4e3cf411680795f234fe865ede1cac68, server=80666a11a09f,42913,1733400130845 because future has completed 2024-12-05T12:06:04,392 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=129, resume processing ppid=128 2024-12-05T12:06:04,392 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=129, ppid=128, state=SUCCESS, hasLock=false; CloseRegionProcedure 9909a1dd17e17384969b8f6599a993d2, server=80666a11a09f,42305,1733400131012 in 167 msec 2024-12-05T12:06:04,393 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=130, resume processing ppid=127 2024-12-05T12:06:04,394 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=130, ppid=127, state=SUCCESS, hasLock=false; CloseRegionProcedure 4e3cf411680795f234fe865ede1cac68, server=80666a11a09f,42913,1733400130845 in 167 msec 2024-12-05T12:06:04,395 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=128, ppid=126, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=9909a1dd17e17384969b8f6599a993d2, UNASSIGN in 174 msec 2024-12-05T12:06:04,396 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=127, resume processing ppid=126 2024-12-05T12:06:04,396 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=127, ppid=126, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=4e3cf411680795f234fe865ede1cac68, UNASSIGN in 176 msec 2024-12-05T12:06:04,400 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=126, resume processing ppid=125 2024-12-05T12:06:04,400 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=126, ppid=125, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testtb-testConsecutiveExports in 181 msec 2024-12-05T12:06:04,401 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testConsecutiveExports","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733400364401"}]},"ts":"1733400364401"} 2024-12-05T12:06:04,403 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testConsecutiveExports, state=DISABLED in hbase:meta 2024-12-05T12:06:04,403 INFO [PEWorker-3 {}] procedure.DisableTableProcedure(296): Set testtb-testConsecutiveExports to state=DISABLED 2024-12-05T12:06:04,412 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=125, state=SUCCESS, hasLock=false; DisableTableProcedure table=testtb-testConsecutiveExports in 195 msec 2024-12-05T12:06:04,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=125 2024-12-05T12:06:04,528 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testtb-testConsecutiveExports completed 2024-12-05T12:06:04,529 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.2 delete testtb-testConsecutiveExports 2024-12-05T12:06:04,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] procedure2.ProcedureExecutor(1139): Stored pid=131, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testtb-testConsecutiveExports 2024-12-05T12:06:04,531 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=131, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testConsecutiveExports 2024-12-05T12:06:04,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] access.PermissionStorage(261): Removing permissions of removed table testtb-testConsecutiveExports 2024-12-05T12:06:04,533 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=131, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testtb-testConsecutiveExports 2024-12-05T12:06:04,539 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42305 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testtb-testConsecutiveExports 2024-12-05T12:06:04,543 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testConsecutiveExports/4e3cf411680795f234fe865ede1cac68 2024-12-05T12:06:04,543 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testConsecutiveExports/9909a1dd17e17384969b8f6599a993d2 2024-12-05T12:06:04,546 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testConsecutiveExports/9909a1dd17e17384969b8f6599a993d2/cf, FileablePath, hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testConsecutiveExports/9909a1dd17e17384969b8f6599a993d2/recovered.edits] 2024-12-05T12:06:04,546 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testConsecutiveExports/4e3cf411680795f234fe865ede1cac68/cf, FileablePath, hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testConsecutiveExports/4e3cf411680795f234fe865ede1cac68/recovered.edits] 2024-12-05T12:06:04,552 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testConsecutiveExports/9909a1dd17e17384969b8f6599a993d2/cf/bb28d51274ff40288ad221ce8750d50d to hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/archive/data/default/testtb-testConsecutiveExports/9909a1dd17e17384969b8f6599a993d2/cf/bb28d51274ff40288ad221ce8750d50d 2024-12-05T12:06:04,553 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testConsecutiveExports/4e3cf411680795f234fe865ede1cac68/cf/33ccfe4260444d849c076f193d9dcbee to hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/archive/data/default/testtb-testConsecutiveExports/4e3cf411680795f234fe865ede1cac68/cf/33ccfe4260444d849c076f193d9dcbee 2024-12-05T12:06:04,560 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testConsecutiveExports/9909a1dd17e17384969b8f6599a993d2/recovered.edits/9.seqid to hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/archive/data/default/testtb-testConsecutiveExports/9909a1dd17e17384969b8f6599a993d2/recovered.edits/9.seqid 2024-12-05T12:06:04,560 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testConsecutiveExports/9909a1dd17e17384969b8f6599a993d2 2024-12-05T12:06:04,561 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testConsecutiveExports/4e3cf411680795f234fe865ede1cac68/recovered.edits/9.seqid to hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/archive/data/default/testtb-testConsecutiveExports/4e3cf411680795f234fe865ede1cac68/recovered.edits/9.seqid 2024-12-05T12:06:04,561 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testConsecutiveExports/4e3cf411680795f234fe865ede1cac68 2024-12-05T12:06:04,561 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(313): Archived testtb-testConsecutiveExports regions 2024-12-05T12:06:04,562 DEBUG [PEWorker-2 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/mobdir/data/default/testtb-testConsecutiveExports/c7b48a2e62736c517f8bfa7d64fc37ac 2024-12-05T12:06:04,563 DEBUG [PEWorker-2 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/mobdir/data/default/testtb-testConsecutiveExports/c7b48a2e62736c517f8bfa7d64fc37ac/cf] 2024-12-05T12:06:04,566 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/mobdir/data/default/testtb-testConsecutiveExports/c7b48a2e62736c517f8bfa7d64fc37ac/cf/c4ca4238a0b923820dcc509a6f75849b20241205b49f36e276fc4524990b8a75e25ccd30_9909a1dd17e17384969b8f6599a993d2 to hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/archive/data/default/testtb-testConsecutiveExports/c7b48a2e62736c517f8bfa7d64fc37ac/cf/c4ca4238a0b923820dcc509a6f75849b20241205b49f36e276fc4524990b8a75e25ccd30_9909a1dd17e17384969b8f6599a993d2 2024-12-05T12:06:04,567 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/mobdir/data/default/testtb-testConsecutiveExports/c7b48a2e62736c517f8bfa7d64fc37ac/cf/d41d8cd98f00b204e9800998ecf8427e202412050bb38da3928f41bfaffece8a8f37846e_4e3cf411680795f234fe865ede1cac68 to hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/archive/data/default/testtb-testConsecutiveExports/c7b48a2e62736c517f8bfa7d64fc37ac/cf/d41d8cd98f00b204e9800998ecf8427e202412050bb38da3928f41bfaffece8a8f37846e_4e3cf411680795f234fe865ede1cac68 2024-12-05T12:06:04,568 DEBUG [PEWorker-2 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/mobdir/data/default/testtb-testConsecutiveExports/c7b48a2e62736c517f8bfa7d64fc37ac 2024-12-05T12:06:04,570 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=131, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testtb-testConsecutiveExports 2024-12-05T12:06:04,572 WARN [PEWorker-2 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 2 rows of testtb-testConsecutiveExports from hbase:meta 2024-12-05T12:06:04,575 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(407): Removing 'testtb-testConsecutiveExports' descriptor. 2024-12-05T12:06:04,578 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=131, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testtb-testConsecutiveExports 2024-12-05T12:06:04,578 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(397): Removing 'testtb-testConsecutiveExports' from region states. 2024-12-05T12:06:04,579 DEBUG [PEWorker-2 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testConsecutiveExports,,1733400318721.4e3cf411680795f234fe865ede1cac68.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733400364579"}]},"ts":"9223372036854775807"} 2024-12-05T12:06:04,579 DEBUG [PEWorker-2 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testConsecutiveExports,1,1733400318721.9909a1dd17e17384969b8f6599a993d2.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733400364579"}]},"ts":"9223372036854775807"} 2024-12-05T12:06:04,582 INFO [PEWorker-2 {}] assignment.RegionStateStore(562): Deleted 2 regions from META 2024-12-05T12:06:04,582 DEBUG [PEWorker-2 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => 4e3cf411680795f234fe865ede1cac68, NAME => 'testtb-testConsecutiveExports,,1733400318721.4e3cf411680795f234fe865ede1cac68.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => 9909a1dd17e17384969b8f6599a993d2, NAME => 'testtb-testConsecutiveExports,1,1733400318721.9909a1dd17e17384969b8f6599a993d2.', STARTKEY => '1', ENDKEY => ''}] 2024-12-05T12:06:04,582 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(401): Marking 'testtb-testConsecutiveExports' as deleted. 2024-12-05T12:06:04,583 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testtb-testConsecutiveExports","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733400364582"}]},"ts":"9223372036854775807"} 2024-12-05T12:06:04,586 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(867): Deleted table testtb-testConsecutiveExports state from META 2024-12-05T12:06:04,587 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(133): Finished pid=131, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testConsecutiveExports 2024-12-05T12:06:04,590 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=131, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testtb-testConsecutiveExports in 58 msec 2024-12-05T12:06:04,612 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42305-0x101a6a890970003, quorum=127.0.0.1:49240, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testConsecutiveExports 2024-12-05T12:06:04,612 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34947-0x101a6a890970000, quorum=127.0.0.1:49240, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testConsecutiveExports 2024-12-05T12:06:04,612 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37629-0x101a6a890970002, quorum=127.0.0.1:49240, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testConsecutiveExports 2024-12-05T12:06:04,612 DEBUG [pool-63-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42913-0x101a6a890970001, quorum=127.0.0.1:49240, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testConsecutiveExports 2024-12-05T12:06:04,613 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testConsecutiveExports with data PBUF 2024-12-05T12:06:04,613 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testConsecutiveExports with data PBUF 2024-12-05T12:06:04,613 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testConsecutiveExports with data PBUF 2024-12-05T12:06:04,613 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testConsecutiveExports with data PBUF 2024-12-05T12:06:04,620 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34947-0x101a6a890970000, quorum=127.0.0.1:49240, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testConsecutiveExports 2024-12-05T12:06:04,620 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37629-0x101a6a890970002, quorum=127.0.0.1:49240, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testConsecutiveExports 2024-12-05T12:06:04,620 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37629-0x101a6a890970002, quorum=127.0.0.1:49240, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-05T12:06:04,620 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34947-0x101a6a890970000, quorum=127.0.0.1:49240, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-05T12:06:04,620 DEBUG [pool-63-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42913-0x101a6a890970001, quorum=127.0.0.1:49240, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testConsecutiveExports 2024-12-05T12:06:04,620 DEBUG [pool-63-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42913-0x101a6a890970001, quorum=127.0.0.1:49240, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-05T12:06:04,621 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42305-0x101a6a890970003, quorum=127.0.0.1:49240, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testConsecutiveExports 2024-12-05T12:06:04,621 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42305-0x101a6a890970003, quorum=127.0.0.1:49240, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-05T12:06:04,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=131 2024-12-05T12:06:04,622 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testtb-testConsecutiveExports 2024-12-05T12:06:04,622 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testtb-testConsecutiveExports completed 2024-12-05T12:06:04,632 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "emptySnaptb0-testConsecutiveExports" type: DISABLED 2024-12-05T12:06:04,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] snapshot.SnapshotManager(381): Deleting snapshot: emptySnaptb0-testConsecutiveExports 2024-12-05T12:06:04,636 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "snaptb0-testConsecutiveExports" type: DISABLED 2024-12-05T12:06:04,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] snapshot.SnapshotManager(381): Deleting snapshot: snaptb0-testConsecutiveExports 2024-12-05T12:06:04,664 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestMobSecureExportSnapshot#testConsecutiveExports Thread=809 (was 809), OpenFileDescriptor=803 (was 822), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=872 (was 866) - SystemLoadAverage LEAK? -, ProcessCount=17 (was 17), AvailableMemoryMB=2748 (was 3313) 2024-12-05T12:06:04,664 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=809 is superior to 500 2024-12-05T12:06:04,685 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestMobSecureExportSnapshot#testExportFileSystemStateWithMergeRegion Thread=809, OpenFileDescriptor=803, MaxFileDescriptor=1048576, SystemLoadAverage=872, ProcessCount=17, AvailableMemoryMB=2747 2024-12-05T12:06:04,685 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=809 is superior to 500 2024-12-05T12:06:04,686 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'testtb-testExportFileSystemStateWithMergeRegion', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-05T12:06:04,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] procedure2.ProcedureExecutor(1139): Stored pid=132, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion 2024-12-05T12:06:04,689 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=132, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion execute state=CREATE_TABLE_PRE_OPERATION 2024-12-05T12:06:04,689 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testExportFileSystemStateWithMergeRegion" procId is: 132 2024-12-05T12:06:04,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=132 2024-12-05T12:06:04,690 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=132, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-05T12:06:04,708 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742176_1352 (size=458) 2024-12-05T12:06:04,708 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742176_1352 (size=458) 2024-12-05T12:06:04,708 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742176_1352 (size=458) 2024-12-05T12:06:04,712 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => 52564b4c12a2de511877a196719da4fe, NAME => 'testtb-testExportFileSystemStateWithMergeRegion,1,1733400364686.52564b4c12a2de511877a196719da4fe.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testExportFileSystemStateWithMergeRegion', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300 2024-12-05T12:06:04,714 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => cac532a447333068e6a2db6ad5aa6d3d, NAME => 'testtb-testExportFileSystemStateWithMergeRegion,,1733400364686.cac532a447333068e6a2db6ad5aa6d3d.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testExportFileSystemStateWithMergeRegion', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300 2024-12-05T12:06:04,733 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742177_1353 (size=83) 2024-12-05T12:06:04,734 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742177_1353 (size=83) 2024-12-05T12:06:04,734 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742177_1353 (size=83) 2024-12-05T12:06:04,735 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithMergeRegion,1,1733400364686.52564b4c12a2de511877a196719da4fe.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-05T12:06:04,735 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(1722): Closing 52564b4c12a2de511877a196719da4fe, disabling compactions & flushes 2024-12-05T12:06:04,735 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithMergeRegion,1,1733400364686.52564b4c12a2de511877a196719da4fe. 2024-12-05T12:06:04,735 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithMergeRegion,1,1733400364686.52564b4c12a2de511877a196719da4fe. 2024-12-05T12:06:04,735 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion,1,1733400364686.52564b4c12a2de511877a196719da4fe. after waiting 0 ms 2024-12-05T12:06:04,735 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion,1,1733400364686.52564b4c12a2de511877a196719da4fe. 2024-12-05T12:06:04,735 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithMergeRegion,1,1733400364686.52564b4c12a2de511877a196719da4fe. 2024-12-05T12:06:04,735 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(1676): Region close journal for 52564b4c12a2de511877a196719da4fe: Waiting for close lock at 1733400364735Disabling compacts and flushes for region at 1733400364735Disabling writes for close at 1733400364735Writing region close event to WAL at 1733400364735Closed at 1733400364735 2024-12-05T12:06:04,740 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742178_1354 (size=83) 2024-12-05T12:06:04,740 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742178_1354 (size=83) 2024-12-05T12:06:04,740 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742178_1354 (size=83) 2024-12-05T12:06:04,741 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithMergeRegion,,1733400364686.cac532a447333068e6a2db6ad5aa6d3d.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-05T12:06:04,741 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(1722): Closing cac532a447333068e6a2db6ad5aa6d3d, disabling compactions & flushes 2024-12-05T12:06:04,741 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithMergeRegion,,1733400364686.cac532a447333068e6a2db6ad5aa6d3d. 2024-12-05T12:06:04,741 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithMergeRegion,,1733400364686.cac532a447333068e6a2db6ad5aa6d3d. 2024-12-05T12:06:04,741 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion,,1733400364686.cac532a447333068e6a2db6ad5aa6d3d. after waiting 0 ms 2024-12-05T12:06:04,741 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion,,1733400364686.cac532a447333068e6a2db6ad5aa6d3d. 2024-12-05T12:06:04,741 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithMergeRegion,,1733400364686.cac532a447333068e6a2db6ad5aa6d3d. 2024-12-05T12:06:04,741 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(1676): Region close journal for cac532a447333068e6a2db6ad5aa6d3d: Waiting for close lock at 1733400364741Disabling compacts and flushes for region at 1733400364741Disabling writes for close at 1733400364741Writing region close event to WAL at 1733400364741Closed at 1733400364741 2024-12-05T12:06:04,743 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=132, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion execute state=CREATE_TABLE_ADD_TO_META 2024-12-05T12:06:04,743 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportFileSystemStateWithMergeRegion,1,1733400364686.52564b4c12a2de511877a196719da4fe.","families":{"info":[{"qualifier":"regioninfo","vlen":82,"tag":[],"timestamp":"1733400364743"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733400364743"}]},"ts":"1733400364743"} 2024-12-05T12:06:04,743 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportFileSystemStateWithMergeRegion,,1733400364686.cac532a447333068e6a2db6ad5aa6d3d.","families":{"info":[{"qualifier":"regioninfo","vlen":82,"tag":[],"timestamp":"1733400364743"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733400364743"}]},"ts":"1733400364743"} 2024-12-05T12:06:04,746 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-12-05T12:06:04,753 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=132, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-05T12:06:04,753 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733400364753"}]},"ts":"1733400364753"} 2024-12-05T12:06:04,755 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithMergeRegion, state=ENABLING in hbase:meta 2024-12-05T12:06:04,756 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(204): Hosts are {80666a11a09f=0} racks are {/default-rack=0} 2024-12-05T12:06:04,757 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-05T12:06:04,757 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-05T12:06:04,757 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-05T12:06:04,757 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-05T12:06:04,757 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-05T12:06:04,757 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-05T12:06:04,757 INFO [PEWorker-4 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-05T12:06:04,757 INFO [PEWorker-4 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-05T12:06:04,757 INFO [PEWorker-4 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-05T12:06:04,757 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-05T12:06:04,758 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=133, ppid=132, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=cac532a447333068e6a2db6ad5aa6d3d, ASSIGN}, {pid=134, ppid=132, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=52564b4c12a2de511877a196719da4fe, ASSIGN}] 2024-12-05T12:06:04,761 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=133, ppid=132, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=cac532a447333068e6a2db6ad5aa6d3d, ASSIGN 2024-12-05T12:06:04,762 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=134, ppid=132, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=52564b4c12a2de511877a196719da4fe, ASSIGN 2024-12-05T12:06:04,767 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(269): Starting pid=133, ppid=132, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=cac532a447333068e6a2db6ad5aa6d3d, ASSIGN; state=OFFLINE, location=80666a11a09f,42305,1733400131012; forceNewPlan=false, retain=false 2024-12-05T12:06:04,767 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(269): Starting pid=134, ppid=132, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=52564b4c12a2de511877a196719da4fe, ASSIGN; state=OFFLINE, location=80666a11a09f,42913,1733400130845; forceNewPlan=false, retain=false 2024-12-05T12:06:04,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=132 2024-12-05T12:06:04,918 INFO [80666a11a09f:34947 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-12-05T12:06:04,918 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=133 updating hbase:meta row=cac532a447333068e6a2db6ad5aa6d3d, regionState=OPENING, regionLocation=80666a11a09f,42305,1733400131012 2024-12-05T12:06:04,919 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=134 updating hbase:meta row=52564b4c12a2de511877a196719da4fe, regionState=OPENING, regionLocation=80666a11a09f,42913,1733400130845 2024-12-05T12:06:04,921 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=133, ppid=132, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=cac532a447333068e6a2db6ad5aa6d3d, ASSIGN because future has completed 2024-12-05T12:06:04,922 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=135, ppid=133, state=RUNNABLE, hasLock=false; OpenRegionProcedure cac532a447333068e6a2db6ad5aa6d3d, server=80666a11a09f,42305,1733400131012}] 2024-12-05T12:06:04,923 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=134, ppid=132, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=52564b4c12a2de511877a196719da4fe, ASSIGN because future has completed 2024-12-05T12:06:04,924 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=136, ppid=134, state=RUNNABLE, hasLock=false; OpenRegionProcedure 52564b4c12a2de511877a196719da4fe, server=80666a11a09f,42913,1733400130845}] 2024-12-05T12:06:05,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=132 2024-12-05T12:06:05,078 INFO [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] handler.AssignRegionHandler(132): Open testtb-testExportFileSystemStateWithMergeRegion,,1733400364686.cac532a447333068e6a2db6ad5aa6d3d. 2024-12-05T12:06:05,078 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(7752): Opening region: {ENCODED => cac532a447333068e6a2db6ad5aa6d3d, NAME => 'testtb-testExportFileSystemStateWithMergeRegion,,1733400364686.cac532a447333068e6a2db6ad5aa6d3d.', STARTKEY => '', ENDKEY => '1'} 2024-12-05T12:06:05,079 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportFileSystemStateWithMergeRegion,,1733400364686.cac532a447333068e6a2db6ad5aa6d3d. service=AccessControlService 2024-12-05T12:06:05,079 INFO [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-05T12:06:05,079 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemStateWithMergeRegion cac532a447333068e6a2db6ad5aa6d3d 2024-12-05T12:06:05,079 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithMergeRegion,,1733400364686.cac532a447333068e6a2db6ad5aa6d3d.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-05T12:06:05,079 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(7794): checking encryption for cac532a447333068e6a2db6ad5aa6d3d 2024-12-05T12:06:05,079 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(7797): checking classloading for cac532a447333068e6a2db6ad5aa6d3d 2024-12-05T12:06:05,081 INFO [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] handler.AssignRegionHandler(132): Open testtb-testExportFileSystemStateWithMergeRegion,1,1733400364686.52564b4c12a2de511877a196719da4fe. 2024-12-05T12:06:05,081 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] regionserver.HRegion(7752): Opening region: {ENCODED => 52564b4c12a2de511877a196719da4fe, NAME => 'testtb-testExportFileSystemStateWithMergeRegion,1,1733400364686.52564b4c12a2de511877a196719da4fe.', STARTKEY => '1', ENDKEY => ''} 2024-12-05T12:06:05,082 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportFileSystemStateWithMergeRegion,1,1733400364686.52564b4c12a2de511877a196719da4fe. service=AccessControlService 2024-12-05T12:06:05,082 INFO [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-05T12:06:05,082 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemStateWithMergeRegion 52564b4c12a2de511877a196719da4fe 2024-12-05T12:06:05,082 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithMergeRegion,1,1733400364686.52564b4c12a2de511877a196719da4fe.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-05T12:06:05,082 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] regionserver.HRegion(7794): checking encryption for 52564b4c12a2de511877a196719da4fe 2024-12-05T12:06:05,082 INFO [StoreOpener-cac532a447333068e6a2db6ad5aa6d3d-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region cac532a447333068e6a2db6ad5aa6d3d 2024-12-05T12:06:05,082 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] regionserver.HRegion(7797): checking classloading for 52564b4c12a2de511877a196719da4fe 2024-12-05T12:06:05,084 INFO [StoreOpener-52564b4c12a2de511877a196719da4fe-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 52564b4c12a2de511877a196719da4fe 2024-12-05T12:06:05,084 INFO [StoreOpener-cac532a447333068e6a2db6ad5aa6d3d-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region cac532a447333068e6a2db6ad5aa6d3d columnFamilyName cf 2024-12-05T12:06:05,085 DEBUG [StoreOpener-cac532a447333068e6a2db6ad5aa6d3d-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T12:06:05,086 INFO [StoreOpener-52564b4c12a2de511877a196719da4fe-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 52564b4c12a2de511877a196719da4fe columnFamilyName cf 2024-12-05T12:06:05,086 INFO [StoreOpener-cac532a447333068e6a2db6ad5aa6d3d-1 {}] regionserver.HStore(327): Store=cac532a447333068e6a2db6ad5aa6d3d/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-05T12:06:05,086 DEBUG [StoreOpener-52564b4c12a2de511877a196719da4fe-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T12:06:05,086 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(1038): replaying wal for cac532a447333068e6a2db6ad5aa6d3d 2024-12-05T12:06:05,087 INFO [StoreOpener-52564b4c12a2de511877a196719da4fe-1 {}] regionserver.HStore(327): Store=52564b4c12a2de511877a196719da4fe/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-05T12:06:05,087 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] regionserver.HRegion(1038): replaying wal for 52564b4c12a2de511877a196719da4fe 2024-12-05T12:06:05,087 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportFileSystemStateWithMergeRegion/cac532a447333068e6a2db6ad5aa6d3d 2024-12-05T12:06:05,087 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportFileSystemStateWithMergeRegion/cac532a447333068e6a2db6ad5aa6d3d 2024-12-05T12:06:05,088 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportFileSystemStateWithMergeRegion/52564b4c12a2de511877a196719da4fe 2024-12-05T12:06:05,088 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(1048): stopping wal replay for cac532a447333068e6a2db6ad5aa6d3d 2024-12-05T12:06:05,088 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(1060): Cleaning up temporary data for cac532a447333068e6a2db6ad5aa6d3d 2024-12-05T12:06:05,088 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportFileSystemStateWithMergeRegion/52564b4c12a2de511877a196719da4fe 2024-12-05T12:06:05,088 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] regionserver.HRegion(1048): stopping wal replay for 52564b4c12a2de511877a196719da4fe 2024-12-05T12:06:05,088 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] regionserver.HRegion(1060): Cleaning up temporary data for 52564b4c12a2de511877a196719da4fe 2024-12-05T12:06:05,089 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(1093): writing seq id for cac532a447333068e6a2db6ad5aa6d3d 2024-12-05T12:06:05,090 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] regionserver.HRegion(1093): writing seq id for 52564b4c12a2de511877a196719da4fe 2024-12-05T12:06:05,092 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportFileSystemStateWithMergeRegion/cac532a447333068e6a2db6ad5aa6d3d/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-05T12:06:05,092 INFO [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(1114): Opened cac532a447333068e6a2db6ad5aa6d3d; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=64910303, jitterRate=-0.0327611118555069}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-05T12:06:05,092 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(1122): Running coprocessor post-open hooks for cac532a447333068e6a2db6ad5aa6d3d 2024-12-05T12:06:05,093 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(1006): Region open journal for cac532a447333068e6a2db6ad5aa6d3d: Running coprocessor pre-open hook at 1733400365079Writing region info on filesystem at 1733400365080 (+1 ms)Initializing all the Stores at 1733400365082 (+2 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733400365082Cleaning up temporary data from old regions at 1733400365088 (+6 ms)Running coprocessor post-open hooks at 1733400365093 (+5 ms)Region opened successfully at 1733400365093 2024-12-05T12:06:05,094 INFO [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportFileSystemStateWithMergeRegion,,1733400364686.cac532a447333068e6a2db6ad5aa6d3d., pid=135, masterSystemTime=1733400365075 2024-12-05T12:06:05,096 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportFileSystemStateWithMergeRegion,,1733400364686.cac532a447333068e6a2db6ad5aa6d3d. 2024-12-05T12:06:05,096 INFO [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] handler.AssignRegionHandler(153): Opened testtb-testExportFileSystemStateWithMergeRegion,,1733400364686.cac532a447333068e6a2db6ad5aa6d3d. 2024-12-05T12:06:05,097 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=133 updating hbase:meta row=cac532a447333068e6a2db6ad5aa6d3d, regionState=OPEN, openSeqNum=2, regionLocation=80666a11a09f,42305,1733400131012 2024-12-05T12:06:05,099 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=135, ppid=133, state=RUNNABLE, hasLock=false; OpenRegionProcedure cac532a447333068e6a2db6ad5aa6d3d, server=80666a11a09f,42305,1733400131012 because future has completed 2024-12-05T12:06:05,102 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=135, resume processing ppid=133 2024-12-05T12:06:05,103 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=135, ppid=133, state=SUCCESS, hasLock=false; OpenRegionProcedure cac532a447333068e6a2db6ad5aa6d3d, server=80666a11a09f,42305,1733400131012 in 178 msec 2024-12-05T12:06:05,103 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportFileSystemStateWithMergeRegion/52564b4c12a2de511877a196719da4fe/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-05T12:06:05,103 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=133, ppid=132, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=cac532a447333068e6a2db6ad5aa6d3d, ASSIGN in 345 msec 2024-12-05T12:06:05,104 INFO [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] regionserver.HRegion(1114): Opened 52564b4c12a2de511877a196719da4fe; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=67644884, jitterRate=0.00798732042312622}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-05T12:06:05,104 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 52564b4c12a2de511877a196719da4fe 2024-12-05T12:06:05,104 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] regionserver.HRegion(1006): Region open journal for 52564b4c12a2de511877a196719da4fe: Running coprocessor pre-open hook at 1733400365082Writing region info on filesystem at 1733400365082Initializing all the Stores at 1733400365083 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733400365083Cleaning up temporary data from old regions at 1733400365088 (+5 ms)Running coprocessor post-open hooks at 1733400365104 (+16 ms)Region opened successfully at 1733400365104 2024-12-05T12:06:05,105 INFO [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportFileSystemStateWithMergeRegion,1,1733400364686.52564b4c12a2de511877a196719da4fe., pid=136, masterSystemTime=1733400365076 2024-12-05T12:06:05,106 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportFileSystemStateWithMergeRegion,1,1733400364686.52564b4c12a2de511877a196719da4fe. 2024-12-05T12:06:05,106 INFO [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] handler.AssignRegionHandler(153): Opened testtb-testExportFileSystemStateWithMergeRegion,1,1733400364686.52564b4c12a2de511877a196719da4fe. 2024-12-05T12:06:05,107 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=134 updating hbase:meta row=52564b4c12a2de511877a196719da4fe, regionState=OPEN, openSeqNum=2, regionLocation=80666a11a09f,42913,1733400130845 2024-12-05T12:06:05,109 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=136, ppid=134, state=RUNNABLE, hasLock=false; OpenRegionProcedure 52564b4c12a2de511877a196719da4fe, server=80666a11a09f,42913,1733400130845 because future has completed 2024-12-05T12:06:05,111 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=136, resume processing ppid=134 2024-12-05T12:06:05,111 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=136, ppid=134, state=SUCCESS, hasLock=false; OpenRegionProcedure 52564b4c12a2de511877a196719da4fe, server=80666a11a09f,42913,1733400130845 in 186 msec 2024-12-05T12:06:05,115 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=134, resume processing ppid=132 2024-12-05T12:06:05,115 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=134, ppid=132, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=52564b4c12a2de511877a196719da4fe, ASSIGN in 354 msec 2024-12-05T12:06:05,116 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=132, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-05T12:06:05,116 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733400365116"}]},"ts":"1733400365116"} 2024-12-05T12:06:05,117 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithMergeRegion, state=ENABLED in hbase:meta 2024-12-05T12:06:05,118 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=132, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion execute state=CREATE_TABLE_POST_OPERATION 2024-12-05T12:06:05,118 DEBUG [PEWorker-4 {}] access.PermissionStorage(177): Writing permission with rowKey testtb-testExportFileSystemStateWithMergeRegion jenkins: RWXCA 2024-12-05T12:06:05,121 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42305 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemStateWithMergeRegion], kv [jenkins: RWXCA] 2024-12-05T12:06:05,162 DEBUG [pool-63-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42913-0x101a6a890970001, quorum=127.0.0.1:49240, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-05T12:06:05,162 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37629-0x101a6a890970002, quorum=127.0.0.1:49240, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-05T12:06:05,162 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34947-0x101a6a890970000, quorum=127.0.0.1:49240, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-05T12:06:05,162 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42305-0x101a6a890970003, quorum=127.0.0.1:49240, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-05T12:06:05,170 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-12-05T12:06:05,170 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-12-05T12:06:05,171 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-12-05T12:06:05,171 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-12-05T12:06:05,172 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=132, state=SUCCESS, hasLock=false; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion in 484 msec 2024-12-05T12:06:05,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=132 2024-12-05T12:06:05,317 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion completed 2024-12-05T12:06:05,318 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithMergeRegion,, stopping at row=testtb-testExportFileSystemStateWithMergeRegion ,, for max=2147483647 with caching=100 2024-12-05T12:06:05,320 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testExportFileSystemStateWithMergeRegion 2024-12-05T12:06:05,320 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testExportFileSystemStateWithMergeRegion,,1733400364686.cac532a447333068e6a2db6ad5aa6d3d. 2024-12-05T12:06:05,320 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-05T12:06:05,322 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithMergeRegion,, stopping at row=testtb-testExportFileSystemStateWithMergeRegion ,, for max=2147483647 with caching=100 2024-12-05T12:06:05,327 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithMergeRegion,, stopping at row=testtb-testExportFileSystemStateWithMergeRegion ,, for max=2147483647 with caching=100 2024-12-05T12:06:05,332 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithMergeRegion,, stopping at row=testtb-testExportFileSystemStateWithMergeRegion ,, for max=2147483647 with caching=100 2024-12-05T12:06:05,335 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } 2024-12-05T12:06:05,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1733400365335 (current time:1733400365335). 2024-12-05T12:06:05,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-05T12:06:05,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot emptySnaptb0-testExportFileSystemStateWithMergeRegion VERSION not specified, setting to 2 2024-12-05T12:06:05,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-05T12:06:05,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@710e8eb, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T12:06:05,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] client.ClusterIdFetcher(90): Going to request 80666a11a09f,34947,-1 for getting cluster id 2024-12-05T12:06:05,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-05T12:06:05,337 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'af3bc8f5-5d92-4279-9ca6-4b73c59b8f54' 2024-12-05T12:06:05,337 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-05T12:06:05,337 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "af3bc8f5-5d92-4279-9ca6-4b73c59b8f54" 2024-12-05T12:06:05,337 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4d11e525, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T12:06:05,337 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [80666a11a09f,34947,-1] 2024-12-05T12:06:05,338 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-05T12:06:05,338 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T12:06:05,339 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56598, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-05T12:06:05,339 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@70d69c85, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T12:06:05,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-05T12:06:05,341 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=80666a11a09f,37629,1733400130958, seqNum=-1] 2024-12-05T12:06:05,341 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-05T12:06:05,342 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52860, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-05T12:06:05,343 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947. 2024-12-05T12:06:05,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-05T12:06:05,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T12:06:05,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T12:06:05,343 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-05T12:06:05,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@14c296eb, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T12:06:05,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] client.ClusterIdFetcher(90): Going to request 80666a11a09f,34947,-1 for getting cluster id 2024-12-05T12:06:05,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-05T12:06:05,345 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'af3bc8f5-5d92-4279-9ca6-4b73c59b8f54' 2024-12-05T12:06:05,345 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-05T12:06:05,345 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "af3bc8f5-5d92-4279-9ca6-4b73c59b8f54" 2024-12-05T12:06:05,346 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5ba19326, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T12:06:05,346 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [80666a11a09f,34947,-1] 2024-12-05T12:06:05,346 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-05T12:06:05,346 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T12:06:05,347 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56628, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-05T12:06:05,347 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@48f55b3c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T12:06:05,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-05T12:06:05,348 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=80666a11a09f,37629,1733400130958, seqNum=-1] 2024-12-05T12:06:05,349 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-05T12:06:05,350 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52876, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-05T12:06:05,351 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportFileSystemStateWithMergeRegion', locateType=CURRENT is [region=hbase:acl,,1733400133831.02119c1ced80736cbd0ab02ca19ee30c., hostname=80666a11a09f,42305,1733400131012, seqNum=2] 2024-12-05T12:06:05,352 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-05T12:06:05,353 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33382, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-05T12:06:05,354 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947. 2024-12-05T12:06:05,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor153.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-05T12:06:05,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T12:06:05,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T12:06:05,354 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-05T12:06:05,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemStateWithMergeRegion], kv [jenkins: RWXCA] 2024-12-05T12:06:05,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-05T12:06:05,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] procedure2.ProcedureExecutor(1139): Stored pid=137, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=137, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } 2024-12-05T12:06:05,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 }, snapshot procedure id = 137 2024-12-05T12:06:05,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=137 2024-12-05T12:06:05,358 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=137, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=137, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-05T12:06:05,359 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=137, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=137, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-05T12:06:05,361 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=137, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=137, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-05T12:06:05,368 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742179_1355 (size=215) 2024-12-05T12:06:05,368 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742179_1355 (size=215) 2024-12-05T12:06:05,369 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742179_1355 (size=215) 2024-12-05T12:06:05,370 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=137, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=137, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-05T12:06:05,370 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=138, ppid=137, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure cac532a447333068e6a2db6ad5aa6d3d}, {pid=139, ppid=137, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 52564b4c12a2de511877a196719da4fe}] 2024-12-05T12:06:05,372 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=139, ppid=137, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 52564b4c12a2de511877a196719da4fe 2024-12-05T12:06:05,372 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=138, ppid=137, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure cac532a447333068e6a2db6ad5aa6d3d 2024-12-05T12:06:05,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=137 2024-12-05T12:06:05,524 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42305 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=138 2024-12-05T12:06:05,524 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42913 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=139 2024-12-05T12:06:05,524 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=139}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithMergeRegion,1,1733400364686.52564b4c12a2de511877a196719da4fe. 2024-12-05T12:06:05,524 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=138}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithMergeRegion,,1733400364686.cac532a447333068e6a2db6ad5aa6d3d. 2024-12-05T12:06:05,524 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=138}] regionserver.HRegion(2603): Flush status journal for cac532a447333068e6a2db6ad5aa6d3d: 2024-12-05T12:06:05,524 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=138}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithMergeRegion,,1733400364686.cac532a447333068e6a2db6ad5aa6d3d. for emptySnaptb0-testExportFileSystemStateWithMergeRegion completed. 2024-12-05T12:06:05,524 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=138}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithMergeRegion,,1733400364686.cac532a447333068e6a2db6ad5aa6d3d.' region-info for snapshot=emptySnaptb0-testExportFileSystemStateWithMergeRegion 2024-12-05T12:06:05,525 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=138}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-05T12:06:05,525 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=138}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-05T12:06:05,525 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=139}] regionserver.HRegion(2603): Flush status journal for 52564b4c12a2de511877a196719da4fe: 2024-12-05T12:06:05,525 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=139}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithMergeRegion,1,1733400364686.52564b4c12a2de511877a196719da4fe. for emptySnaptb0-testExportFileSystemStateWithMergeRegion completed. 2024-12-05T12:06:05,525 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=139}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithMergeRegion,1,1733400364686.52564b4c12a2de511877a196719da4fe.' region-info for snapshot=emptySnaptb0-testExportFileSystemStateWithMergeRegion 2024-12-05T12:06:05,525 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=139}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-05T12:06:05,525 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=139}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-05T12:06:05,556 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742181_1357 (size=86) 2024-12-05T12:06:05,556 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742181_1357 (size=86) 2024-12-05T12:06:05,557 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742181_1357 (size=86) 2024-12-05T12:06:05,557 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=139}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithMergeRegion,1,1733400364686.52564b4c12a2de511877a196719da4fe. 2024-12-05T12:06:05,557 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=139}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=139 2024-12-05T12:06:05,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] master.HMaster(4169): Remote procedure done, pid=139 2024-12-05T12:06:05,558 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportFileSystemStateWithMergeRegion on region 52564b4c12a2de511877a196719da4fe 2024-12-05T12:06:05,558 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=139, ppid=137, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 52564b4c12a2de511877a196719da4fe 2024-12-05T12:06:05,561 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=139, ppid=137, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 52564b4c12a2de511877a196719da4fe in 190 msec 2024-12-05T12:06:05,568 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742180_1356 (size=86) 2024-12-05T12:06:05,568 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742180_1356 (size=86) 2024-12-05T12:06:05,568 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742180_1356 (size=86) 2024-12-05T12:06:05,569 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=138}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithMergeRegion,,1733400364686.cac532a447333068e6a2db6ad5aa6d3d. 2024-12-05T12:06:05,569 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=138}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=138 2024-12-05T12:06:05,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] master.HMaster(4169): Remote procedure done, pid=138 2024-12-05T12:06:05,569 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportFileSystemStateWithMergeRegion on region cac532a447333068e6a2db6ad5aa6d3d 2024-12-05T12:06:05,570 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=138, ppid=137, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure cac532a447333068e6a2db6ad5aa6d3d 2024-12-05T12:06:05,585 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=138, resume processing ppid=137 2024-12-05T12:06:05,585 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=138, ppid=137, state=SUCCESS, hasLock=false; SnapshotRegionProcedure cac532a447333068e6a2db6ad5aa6d3d in 213 msec 2024-12-05T12:06:05,585 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=137, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=137, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-05T12:06:05,587 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=137, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=137, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-05T12:06:05,592 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-12-05T12:06:05,592 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-12-05T12:06:05,592 DEBUG [MobRegionSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T12:06:05,593 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(328): No files under family: cf 2024-12-05T12:06:05,607 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742182_1358 (size=78) 2024-12-05T12:06:05,607 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742182_1358 (size=78) 2024-12-05T12:06:05,608 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742182_1358 (size=78) 2024-12-05T12:06:05,611 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=137, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=137, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-05T12:06:05,611 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testExportFileSystemStateWithMergeRegion 2024-12-05T12:06:05,612 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/.hbase-snapshot/.tmp/emptySnaptb0-testExportFileSystemStateWithMergeRegion 2024-12-05T12:06:05,653 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742183_1359 (size=713) 2024-12-05T12:06:05,653 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742183_1359 (size=713) 2024-12-05T12:06:05,653 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742183_1359 (size=713) 2024-12-05T12:06:05,665 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=137, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=137, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-05T12:06:05,676 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=137, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=137, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-05T12:06:05,676 DEBUG [PEWorker-4 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/.hbase-snapshot/.tmp/emptySnaptb0-testExportFileSystemStateWithMergeRegion to hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/.hbase-snapshot/emptySnaptb0-testExportFileSystemStateWithMergeRegion 2024-12-05T12:06:05,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=137 2024-12-05T12:06:05,678 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=137, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=137, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-05T12:06:05,678 DEBUG [PEWorker-4 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 }, snapshot procedure id = 137 2024-12-05T12:06:05,680 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=137, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=137, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } in 323 msec 2024-12-05T12:06:05,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=137 2024-12-05T12:06:05,988 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion completed 2024-12-05T12:06:05,994 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42305 {}] regionserver.HRegion(8528): writing data to region testtb-testExportFileSystemStateWithMergeRegion,,1733400364686.cac532a447333068e6a2db6ad5aa6d3d. with WAL disabled. Data may be lost in the event of a crash. 2024-12-05T12:06:05,995 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42913 {}] regionserver.HRegion(8528): writing data to region testtb-testExportFileSystemStateWithMergeRegion,1,1733400364686.52564b4c12a2de511877a196719da4fe. with WAL disabled. Data may be lost in the event of a crash. 2024-12-05T12:06:05,997 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithMergeRegion,, stopping at row=testtb-testExportFileSystemStateWithMergeRegion ,, for max=2147483647 with caching=100 2024-12-05T12:06:06,004 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testExportFileSystemStateWithMergeRegion 2024-12-05T12:06:06,004 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testExportFileSystemStateWithMergeRegion,,1733400364686.cac532a447333068e6a2db6ad5aa6d3d. 2024-12-05T12:06:06,004 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-05T12:06:06,006 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithMergeRegion,, stopping at row=testtb-testExportFileSystemStateWithMergeRegion ,, for max=2147483647 with caching=100 2024-12-05T12:06:06,011 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithMergeRegion,, stopping at row=testtb-testExportFileSystemStateWithMergeRegion ,, for max=2147483647 with caching=100 2024-12-05T12:06:06,019 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithMergeRegion,, stopping at row=testtb-testExportFileSystemStateWithMergeRegion ,, for max=2147483647 with caching=100 2024-12-05T12:06:06,023 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } 2024-12-05T12:06:06,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1733400366023 (current time:1733400366023). 2024-12-05T12:06:06,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-05T12:06:06,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snaptb0-testExportFileSystemStateWithMergeRegion VERSION not specified, setting to 2 2024-12-05T12:06:06,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-05T12:06:06,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5a19334c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T12:06:06,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] client.ClusterIdFetcher(90): Going to request 80666a11a09f,34947,-1 for getting cluster id 2024-12-05T12:06:06,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-05T12:06:06,025 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'af3bc8f5-5d92-4279-9ca6-4b73c59b8f54' 2024-12-05T12:06:06,025 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-05T12:06:06,025 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "af3bc8f5-5d92-4279-9ca6-4b73c59b8f54" 2024-12-05T12:06:06,025 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@492fbdd5, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T12:06:06,025 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [80666a11a09f,34947,-1] 2024-12-05T12:06:06,026 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-05T12:06:06,026 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T12:06:06,027 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56642, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-05T12:06:06,027 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@8c8a6d3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T12:06:06,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-05T12:06:06,029 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=80666a11a09f,37629,1733400130958, seqNum=-1] 2024-12-05T12:06:06,029 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-05T12:06:06,030 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52882, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-05T12:06:06,031 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947. 2024-12-05T12:06:06,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-05T12:06:06,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T12:06:06,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T12:06:06,034 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-05T12:06:06,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2ee8ba43, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T12:06:06,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] client.ClusterIdFetcher(90): Going to request 80666a11a09f,34947,-1 for getting cluster id 2024-12-05T12:06:06,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-05T12:06:06,044 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'af3bc8f5-5d92-4279-9ca6-4b73c59b8f54' 2024-12-05T12:06:06,044 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-05T12:06:06,044 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "af3bc8f5-5d92-4279-9ca6-4b73c59b8f54" 2024-12-05T12:06:06,044 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@51578bc9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T12:06:06,044 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [80666a11a09f,34947,-1] 2024-12-05T12:06:06,045 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-05T12:06:06,045 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T12:06:06,046 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56668, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-05T12:06:06,046 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@72a6300, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T12:06:06,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-05T12:06:06,048 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=80666a11a09f,37629,1733400130958, seqNum=-1] 2024-12-05T12:06:06,048 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-05T12:06:06,049 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52892, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-05T12:06:06,052 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportFileSystemStateWithMergeRegion', locateType=CURRENT is [region=hbase:acl,,1733400133831.02119c1ced80736cbd0ab02ca19ee30c., hostname=80666a11a09f,42305,1733400131012, seqNum=2] 2024-12-05T12:06:06,052 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-05T12:06:06,053 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33394, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-05T12:06:06,055 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947. 2024-12-05T12:06:06,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor153.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-05T12:06:06,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T12:06:06,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T12:06:06,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemStateWithMergeRegion], kv [jenkins: RWXCA] 2024-12-05T12:06:06,056 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-05T12:06:06,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-05T12:06:06,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] procedure2.ProcedureExecutor(1139): Stored pid=140, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=140, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } 2024-12-05T12:06:06,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 }, snapshot procedure id = 140 2024-12-05T12:06:06,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=140 2024-12-05T12:06:06,059 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=140, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=140, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-05T12:06:06,060 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=140, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=140, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-05T12:06:06,063 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=140, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=140, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-05T12:06:06,081 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742184_1360 (size=210) 2024-12-05T12:06:06,081 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742184_1360 (size=210) 2024-12-05T12:06:06,081 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742184_1360 (size=210) 2024-12-05T12:06:06,083 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=140, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=140, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-05T12:06:06,084 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=141, ppid=140, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure cac532a447333068e6a2db6ad5aa6d3d}, {pid=142, ppid=140, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 52564b4c12a2de511877a196719da4fe}] 2024-12-05T12:06:06,085 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=141, ppid=140, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure cac532a447333068e6a2db6ad5aa6d3d 2024-12-05T12:06:06,085 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=142, ppid=140, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 52564b4c12a2de511877a196719da4fe 2024-12-05T12:06:06,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=140 2024-12-05T12:06:06,237 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42913 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=142 2024-12-05T12:06:06,237 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42305 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=141 2024-12-05T12:06:06,237 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithMergeRegion,1,1733400364686.52564b4c12a2de511877a196719da4fe. 2024-12-05T12:06:06,237 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] regionserver.HRegion(2902): Flushing 52564b4c12a2de511877a196719da4fe 1/1 column families, dataSize=3.13 KB heapSize=7 KB 2024-12-05T12:06:06,238 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithMergeRegion,,1733400364686.cac532a447333068e6a2db6ad5aa6d3d. 2024-12-05T12:06:06,238 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] regionserver.HRegion(2902): Flushing cac532a447333068e6a2db6ad5aa6d3d 1/1 column families, dataSize=132 B heapSize=544 B 2024-12-05T12:06:06,263 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241205fe4ca56a1d04466ba2d67ff34b9ba85b_cac532a447333068e6a2db6ad5aa6d3d is 71, key is 0d80b6554916138662e1ea66827bae7c/cf:q/1733400365994/Put/seqid=0 2024-12-05T12:06:06,264 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b202412051c5e65207e454f83b70d1ed136604c5c_52564b4c12a2de511877a196719da4fe is 71, key is 10d84dc34b15af3b937e3ca995e97197/cf:q/1733400365995/Put/seqid=0 2024-12-05T12:06:06,292 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742185_1361 (size=8241) 2024-12-05T12:06:06,292 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742185_1361 (size=8241) 2024-12-05T12:06:06,293 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742185_1361 (size=8241) 2024-12-05T12:06:06,293 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T12:06:06,300 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b202412051c5e65207e454f83b70d1ed136604c5c_52564b4c12a2de511877a196719da4fe to hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/mobdir/data/default/testtb-testExportFileSystemStateWithMergeRegion/aaca68e56e63a701f5c1ec9dc2f0511c/cf/c4ca4238a0b923820dcc509a6f75849b202412051c5e65207e454f83b70d1ed136604c5c_52564b4c12a2de511877a196719da4fe 2024-12-05T12:06:06,301 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportFileSystemStateWithMergeRegion/52564b4c12a2de511877a196719da4fe/.tmp/cf/a3ca7ef33d46498e9c8e771f307e3fe5, store: [table=testtb-testExportFileSystemStateWithMergeRegion family=cf region=52564b4c12a2de511877a196719da4fe] 2024-12-05T12:06:06,302 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportFileSystemStateWithMergeRegion/52564b4c12a2de511877a196719da4fe/.tmp/cf/a3ca7ef33d46498e9c8e771f307e3fe5 is 224, key is 1af9d9fe14d175f134df22a9b50be9fd7/cf:q/1733400365995/Put/seqid=0 2024-12-05T12:06:06,317 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742186_1362 (size=5032) 2024-12-05T12:06:06,317 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742186_1362 (size=5032) 2024-12-05T12:06:06,317 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742186_1362 (size=5032) 2024-12-05T12:06:06,318 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T12:06:06,324 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241205fe4ca56a1d04466ba2d67ff34b9ba85b_cac532a447333068e6a2db6ad5aa6d3d to hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/mobdir/data/default/testtb-testExportFileSystemStateWithMergeRegion/aaca68e56e63a701f5c1ec9dc2f0511c/cf/d41d8cd98f00b204e9800998ecf8427e20241205fe4ca56a1d04466ba2d67ff34b9ba85b_cac532a447333068e6a2db6ad5aa6d3d 2024-12-05T12:06:06,325 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportFileSystemStateWithMergeRegion/cac532a447333068e6a2db6ad5aa6d3d/.tmp/cf/17a00bfbf5b7419f9fc36d0ed7657846, store: [table=testtb-testExportFileSystemStateWithMergeRegion family=cf region=cac532a447333068e6a2db6ad5aa6d3d] 2024-12-05T12:06:06,326 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportFileSystemStateWithMergeRegion/cac532a447333068e6a2db6ad5aa6d3d/.tmp/cf/17a00bfbf5b7419f9fc36d0ed7657846 is 224, key is 0c828977cce4efa137e5b6a0c6d99cbe7/cf:q/1733400365994/Put/seqid=0 2024-12-05T12:06:06,339 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742187_1363 (size=15937) 2024-12-05T12:06:06,343 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742187_1363 (size=15937) 2024-12-05T12:06:06,344 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742187_1363 (size=15937) 2024-12-05T12:06:06,344 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=6, memsize=3.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportFileSystemStateWithMergeRegion/52564b4c12a2de511877a196719da4fe/.tmp/cf/a3ca7ef33d46498e9c8e771f307e3fe5 2024-12-05T12:06:06,353 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportFileSystemStateWithMergeRegion/52564b4c12a2de511877a196719da4fe/.tmp/cf/a3ca7ef33d46498e9c8e771f307e3fe5 as hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportFileSystemStateWithMergeRegion/52564b4c12a2de511877a196719da4fe/cf/a3ca7ef33d46498e9c8e771f307e3fe5 2024-12-05T12:06:06,356 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742188_1364 (size=5754) 2024-12-05T12:06:06,356 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742188_1364 (size=5754) 2024-12-05T12:06:06,357 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742188_1364 (size=5754) 2024-12-05T12:06:06,357 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=6, memsize=132, hasBloomFilter=true, into tmp file hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportFileSystemStateWithMergeRegion/cac532a447333068e6a2db6ad5aa6d3d/.tmp/cf/17a00bfbf5b7419f9fc36d0ed7657846 2024-12-05T12:06:06,360 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportFileSystemStateWithMergeRegion/52564b4c12a2de511877a196719da4fe/cf/a3ca7ef33d46498e9c8e771f307e3fe5, entries=48, sequenceid=6, filesize=15.6 K 2024-12-05T12:06:06,361 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] regionserver.HRegion(3140): Finished flush of dataSize ~3.13 KB/3204, heapSize ~6.98 KB/7152, currentSize=0 B/0 for 52564b4c12a2de511877a196719da4fe in 124ms, sequenceid=6, compaction requested=false 2024-12-05T12:06:06,361 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportFileSystemStateWithMergeRegion' 2024-12-05T12:06:06,362 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] regionserver.HRegion(2603): Flush status journal for 52564b4c12a2de511877a196719da4fe: 2024-12-05T12:06:06,362 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithMergeRegion,1,1733400364686.52564b4c12a2de511877a196719da4fe. for snaptb0-testExportFileSystemStateWithMergeRegion completed. 2024-12-05T12:06:06,362 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithMergeRegion,1,1733400364686.52564b4c12a2de511877a196719da4fe.' region-info for snapshot=snaptb0-testExportFileSystemStateWithMergeRegion 2024-12-05T12:06:06,362 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-05T12:06:06,362 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportFileSystemStateWithMergeRegion/52564b4c12a2de511877a196719da4fe/cf/a3ca7ef33d46498e9c8e771f307e3fe5] hfiles 2024-12-05T12:06:06,362 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportFileSystemStateWithMergeRegion/52564b4c12a2de511877a196719da4fe/cf/a3ca7ef33d46498e9c8e771f307e3fe5 for snapshot=snaptb0-testExportFileSystemStateWithMergeRegion 2024-12-05T12:06:06,363 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportFileSystemStateWithMergeRegion/cac532a447333068e6a2db6ad5aa6d3d/.tmp/cf/17a00bfbf5b7419f9fc36d0ed7657846 as hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportFileSystemStateWithMergeRegion/cac532a447333068e6a2db6ad5aa6d3d/cf/17a00bfbf5b7419f9fc36d0ed7657846 2024-12-05T12:06:06,368 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportFileSystemStateWithMergeRegion/cac532a447333068e6a2db6ad5aa6d3d/cf/17a00bfbf5b7419f9fc36d0ed7657846, entries=2, sequenceid=6, filesize=5.6 K 2024-12-05T12:06:06,368 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] regionserver.HRegion(3140): Finished flush of dataSize ~132 B/132, heapSize ~528 B/528, currentSize=0 B/0 for cac532a447333068e6a2db6ad5aa6d3d in 130ms, sequenceid=6, compaction requested=false 2024-12-05T12:06:06,369 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] regionserver.HRegion(2603): Flush status journal for cac532a447333068e6a2db6ad5aa6d3d: 2024-12-05T12:06:06,369 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithMergeRegion,,1733400364686.cac532a447333068e6a2db6ad5aa6d3d. for snaptb0-testExportFileSystemStateWithMergeRegion completed. 2024-12-05T12:06:06,369 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithMergeRegion,,1733400364686.cac532a447333068e6a2db6ad5aa6d3d.' region-info for snapshot=snaptb0-testExportFileSystemStateWithMergeRegion 2024-12-05T12:06:06,369 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-05T12:06:06,369 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportFileSystemStateWithMergeRegion/cac532a447333068e6a2db6ad5aa6d3d/cf/17a00bfbf5b7419f9fc36d0ed7657846] hfiles 2024-12-05T12:06:06,369 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportFileSystemStateWithMergeRegion/cac532a447333068e6a2db6ad5aa6d3d/cf/17a00bfbf5b7419f9fc36d0ed7657846 for snapshot=snaptb0-testExportFileSystemStateWithMergeRegion 2024-12-05T12:06:06,375 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742189_1365 (size=125) 2024-12-05T12:06:06,375 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742189_1365 (size=125) 2024-12-05T12:06:06,375 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742189_1365 (size=125) 2024-12-05T12:06:06,376 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithMergeRegion,1,1733400364686.52564b4c12a2de511877a196719da4fe. 2024-12-05T12:06:06,376 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=142 2024-12-05T12:06:06,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] master.HMaster(4169): Remote procedure done, pid=142 2024-12-05T12:06:06,376 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportFileSystemStateWithMergeRegion on region 52564b4c12a2de511877a196719da4fe 2024-12-05T12:06:06,376 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=142, ppid=140, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 52564b4c12a2de511877a196719da4fe 2024-12-05T12:06:06,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=140 2024-12-05T12:06:06,378 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=142, ppid=140, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 52564b4c12a2de511877a196719da4fe in 293 msec 2024-12-05T12:06:06,386 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742190_1366 (size=125) 2024-12-05T12:06:06,386 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742190_1366 (size=125) 2024-12-05T12:06:06,386 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742190_1366 (size=125) 2024-12-05T12:06:06,387 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithMergeRegion,,1733400364686.cac532a447333068e6a2db6ad5aa6d3d. 2024-12-05T12:06:06,387 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=141 2024-12-05T12:06:06,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] master.HMaster(4169): Remote procedure done, pid=141 2024-12-05T12:06:06,387 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportFileSystemStateWithMergeRegion on region cac532a447333068e6a2db6ad5aa6d3d 2024-12-05T12:06:06,387 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=141, ppid=140, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure cac532a447333068e6a2db6ad5aa6d3d 2024-12-05T12:06:06,389 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=141, resume processing ppid=140 2024-12-05T12:06:06,389 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=140, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=140, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-05T12:06:06,389 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=141, ppid=140, state=SUCCESS, hasLock=false; SnapshotRegionProcedure cac532a447333068e6a2db6ad5aa6d3d in 304 msec 2024-12-05T12:06:06,390 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=140, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=140, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-05T12:06:06,391 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-12-05T12:06:06,391 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-12-05T12:06:06,391 DEBUG [MobRegionSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T12:06:06,392 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(366): Adding snapshot references for [hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/mobdir/data/default/testtb-testExportFileSystemStateWithMergeRegion/aaca68e56e63a701f5c1ec9dc2f0511c/cf/c4ca4238a0b923820dcc509a6f75849b202412051c5e65207e454f83b70d1ed136604c5c_52564b4c12a2de511877a196719da4fe, hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/mobdir/data/default/testtb-testExportFileSystemStateWithMergeRegion/aaca68e56e63a701f5c1ec9dc2f0511c/cf/d41d8cd98f00b204e9800998ecf8427e20241205fe4ca56a1d04466ba2d67ff34b9ba85b_cac532a447333068e6a2db6ad5aa6d3d] hfiles 2024-12-05T12:06:06,392 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (1/2): hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/mobdir/data/default/testtb-testExportFileSystemStateWithMergeRegion/aaca68e56e63a701f5c1ec9dc2f0511c/cf/c4ca4238a0b923820dcc509a6f75849b202412051c5e65207e454f83b70d1ed136604c5c_52564b4c12a2de511877a196719da4fe 2024-12-05T12:06:06,392 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (2/2): hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/mobdir/data/default/testtb-testExportFileSystemStateWithMergeRegion/aaca68e56e63a701f5c1ec9dc2f0511c/cf/d41d8cd98f00b204e9800998ecf8427e20241205fe4ca56a1d04466ba2d67ff34b9ba85b_cac532a447333068e6a2db6ad5aa6d3d 2024-12-05T12:06:06,403 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742191_1367 (size=309) 2024-12-05T12:06:06,403 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742191_1367 (size=309) 2024-12-05T12:06:06,403 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742191_1367 (size=309) 2024-12-05T12:06:06,404 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=140, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=140, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-05T12:06:06,404 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testExportFileSystemStateWithMergeRegion 2024-12-05T12:06:06,405 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithMergeRegion 2024-12-05T12:06:06,421 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742192_1368 (size=1023) 2024-12-05T12:06:06,421 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742192_1368 (size=1023) 2024-12-05T12:06:06,422 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742192_1368 (size=1023) 2024-12-05T12:06:06,432 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=140, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=140, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-05T12:06:06,441 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=140, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=140, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-05T12:06:06,441 DEBUG [PEWorker-3 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithMergeRegion to hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion 2024-12-05T12:06:06,443 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=140, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=140, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-05T12:06:06,443 DEBUG [PEWorker-3 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 }, snapshot procedure id = 140 2024-12-05T12:06:06,445 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=140, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=140, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } in 387 msec 2024-12-05T12:06:06,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=140 2024-12-05T12:06:06,688 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion completed 2024-12-05T12:06:06,691 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33404, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-05T12:06:06,691 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52896, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-05T12:06:06,692 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58258, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-05T12:06:06,693 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'testtb-testExportFileSystemStateWithMergeRegion-1', {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-05T12:06:06,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] procedure2.ProcedureExecutor(1139): Stored pid=143, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-05T12:06:06,696 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=143, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 execute state=CREATE_TABLE_PRE_OPERATION 2024-12-05T12:06:06,696 DEBUG [PEWorker-5 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T12:06:06,696 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testExportFileSystemStateWithMergeRegion-1" procId is: 143 2024-12-05T12:06:06,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=143 2024-12-05T12:06:06,697 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=143, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-05T12:06:06,716 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742193_1369 (size=399) 2024-12-05T12:06:06,716 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742193_1369 (size=399) 2024-12-05T12:06:06,716 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742193_1369 (size=399) 2024-12-05T12:06:06,719 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 3389d73927f32bc82a09c7249b8729b4, NAME => 'testtb-testExportFileSystemStateWithMergeRegion-1,,1733400366693.3389d73927f32bc82a09c7249b8729b4.', STARTKEY => '', ENDKEY => '2'}, tableDescriptor='testtb-testExportFileSystemStateWithMergeRegion-1', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300 2024-12-05T12:06:06,719 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => ec786c79ff70ab9414a05ecd6c343e10, NAME => 'testtb-testExportFileSystemStateWithMergeRegion-1,2,1733400366693.ec786c79ff70ab9414a05ecd6c343e10.', STARTKEY => '2', ENDKEY => ''}, tableDescriptor='testtb-testExportFileSystemStateWithMergeRegion-1', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300 2024-12-05T12:06:06,746 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742194_1370 (size=85) 2024-12-05T12:06:06,746 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742194_1370 (size=85) 2024-12-05T12:06:06,746 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742194_1370 (size=85) 2024-12-05T12:06:06,747 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithMergeRegion-1,2,1733400366693.ec786c79ff70ab9414a05ecd6c343e10.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-05T12:06:06,747 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(1722): Closing ec786c79ff70ab9414a05ecd6c343e10, disabling compactions & flushes 2024-12-05T12:06:06,747 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithMergeRegion-1,2,1733400366693.ec786c79ff70ab9414a05ecd6c343e10. 2024-12-05T12:06:06,747 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithMergeRegion-1,2,1733400366693.ec786c79ff70ab9414a05ecd6c343e10. 2024-12-05T12:06:06,747 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion-1,2,1733400366693.ec786c79ff70ab9414a05ecd6c343e10. after waiting 0 ms 2024-12-05T12:06:06,748 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion-1,2,1733400366693.ec786c79ff70ab9414a05ecd6c343e10. 2024-12-05T12:06:06,748 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithMergeRegion-1,2,1733400366693.ec786c79ff70ab9414a05ecd6c343e10. 2024-12-05T12:06:06,748 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(1676): Region close journal for ec786c79ff70ab9414a05ecd6c343e10: Waiting for close lock at 1733400366747Disabling compacts and flushes for region at 1733400366747Disabling writes for close at 1733400366747Writing region close event to WAL at 1733400366748 (+1 ms)Closed at 1733400366748 2024-12-05T12:06:06,756 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742195_1371 (size=85) 2024-12-05T12:06:06,756 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742195_1371 (size=85) 2024-12-05T12:06:06,756 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742195_1371 (size=85) 2024-12-05T12:06:06,757 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithMergeRegion-1,,1733400366693.3389d73927f32bc82a09c7249b8729b4.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-05T12:06:06,757 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(1722): Closing 3389d73927f32bc82a09c7249b8729b4, disabling compactions & flushes 2024-12-05T12:06:06,757 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithMergeRegion-1,,1733400366693.3389d73927f32bc82a09c7249b8729b4. 2024-12-05T12:06:06,757 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithMergeRegion-1,,1733400366693.3389d73927f32bc82a09c7249b8729b4. 2024-12-05T12:06:06,757 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion-1,,1733400366693.3389d73927f32bc82a09c7249b8729b4. after waiting 0 ms 2024-12-05T12:06:06,757 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion-1,,1733400366693.3389d73927f32bc82a09c7249b8729b4. 2024-12-05T12:06:06,757 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithMergeRegion-1,,1733400366693.3389d73927f32bc82a09c7249b8729b4. 2024-12-05T12:06:06,757 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(1676): Region close journal for 3389d73927f32bc82a09c7249b8729b4: Waiting for close lock at 1733400366757Disabling compacts and flushes for region at 1733400366757Disabling writes for close at 1733400366757Writing region close event to WAL at 1733400366757Closed at 1733400366757 2024-12-05T12:06:06,758 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=143, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 execute state=CREATE_TABLE_ADD_TO_META 2024-12-05T12:06:06,758 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportFileSystemStateWithMergeRegion-1,2,1733400366693.ec786c79ff70ab9414a05ecd6c343e10.","families":{"info":[{"qualifier":"regioninfo","vlen":84,"tag":[],"timestamp":"1733400366758"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733400366758"}]},"ts":"1733400366758"} 2024-12-05T12:06:06,758 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportFileSystemStateWithMergeRegion-1,,1733400366693.3389d73927f32bc82a09c7249b8729b4.","families":{"info":[{"qualifier":"regioninfo","vlen":84,"tag":[],"timestamp":"1733400366758"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733400366758"}]},"ts":"1733400366758"} 2024-12-05T12:06:06,761 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-12-05T12:06:06,761 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=143, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-05T12:06:06,762 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion-1","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733400366761"}]},"ts":"1733400366761"} 2024-12-05T12:06:06,763 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithMergeRegion-1, state=ENABLING in hbase:meta 2024-12-05T12:06:06,763 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(204): Hosts are {80666a11a09f=0} racks are {/default-rack=0} 2024-12-05T12:06:06,764 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-05T12:06:06,764 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-05T12:06:06,764 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-05T12:06:06,764 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-05T12:06:06,764 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-05T12:06:06,764 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-05T12:06:06,764 INFO [PEWorker-5 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-05T12:06:06,764 INFO [PEWorker-5 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-05T12:06:06,764 INFO [PEWorker-5 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-05T12:06:06,764 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-05T12:06:06,765 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=144, ppid=143, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=3389d73927f32bc82a09c7249b8729b4, ASSIGN}, {pid=145, ppid=143, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=ec786c79ff70ab9414a05ecd6c343e10, ASSIGN}] 2024-12-05T12:06:06,766 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=144, ppid=143, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=3389d73927f32bc82a09c7249b8729b4, ASSIGN 2024-12-05T12:06:06,766 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=145, ppid=143, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=ec786c79ff70ab9414a05ecd6c343e10, ASSIGN 2024-12-05T12:06:06,767 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=144, ppid=143, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=3389d73927f32bc82a09c7249b8729b4, ASSIGN; state=OFFLINE, location=80666a11a09f,42305,1733400131012; forceNewPlan=false, retain=false 2024-12-05T12:06:06,767 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(269): Starting pid=145, ppid=143, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=ec786c79ff70ab9414a05ecd6c343e10, ASSIGN; state=OFFLINE, location=80666a11a09f,42913,1733400130845; forceNewPlan=false, retain=false 2024-12-05T12:06:06,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=143 2024-12-05T12:06:06,918 INFO [80666a11a09f:34947 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-12-05T12:06:06,918 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=144 updating hbase:meta row=3389d73927f32bc82a09c7249b8729b4, regionState=OPENING, regionLocation=80666a11a09f,42305,1733400131012 2024-12-05T12:06:06,918 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=145 updating hbase:meta row=ec786c79ff70ab9414a05ecd6c343e10, regionState=OPENING, regionLocation=80666a11a09f,42913,1733400130845 2024-12-05T12:06:06,920 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=144, ppid=143, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=3389d73927f32bc82a09c7249b8729b4, ASSIGN because future has completed 2024-12-05T12:06:06,920 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=146, ppid=144, state=RUNNABLE, hasLock=false; OpenRegionProcedure 3389d73927f32bc82a09c7249b8729b4, server=80666a11a09f,42305,1733400131012}] 2024-12-05T12:06:06,920 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=145, ppid=143, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=ec786c79ff70ab9414a05ecd6c343e10, ASSIGN because future has completed 2024-12-05T12:06:06,921 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=147, ppid=145, state=RUNNABLE, hasLock=false; OpenRegionProcedure ec786c79ff70ab9414a05ecd6c343e10, server=80666a11a09f,42913,1733400130845}] 2024-12-05T12:06:07,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=143 2024-12-05T12:06:07,074 INFO [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] handler.AssignRegionHandler(132): Open testtb-testExportFileSystemStateWithMergeRegion-1,,1733400366693.3389d73927f32bc82a09c7249b8729b4. 2024-12-05T12:06:07,074 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(7752): Opening region: {ENCODED => 3389d73927f32bc82a09c7249b8729b4, NAME => 'testtb-testExportFileSystemStateWithMergeRegion-1,,1733400366693.3389d73927f32bc82a09c7249b8729b4.', STARTKEY => '', ENDKEY => '2'} 2024-12-05T12:06:07,074 INFO [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] handler.AssignRegionHandler(132): Open testtb-testExportFileSystemStateWithMergeRegion-1,2,1733400366693.ec786c79ff70ab9414a05ecd6c343e10. 2024-12-05T12:06:07,074 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportFileSystemStateWithMergeRegion-1,,1733400366693.3389d73927f32bc82a09c7249b8729b4. service=AccessControlService 2024-12-05T12:06:07,074 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegion(7752): Opening region: {ENCODED => ec786c79ff70ab9414a05ecd6c343e10, NAME => 'testtb-testExportFileSystemStateWithMergeRegion-1,2,1733400366693.ec786c79ff70ab9414a05ecd6c343e10.', STARTKEY => '2', ENDKEY => ''} 2024-12-05T12:06:07,074 INFO [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-05T12:06:07,074 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportFileSystemStateWithMergeRegion-1,2,1733400366693.ec786c79ff70ab9414a05ecd6c343e10. service=AccessControlService 2024-12-05T12:06:07,075 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemStateWithMergeRegion-1 3389d73927f32bc82a09c7249b8729b4 2024-12-05T12:06:07,075 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithMergeRegion-1,,1733400366693.3389d73927f32bc82a09c7249b8729b4.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-05T12:06:07,075 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(7794): checking encryption for 3389d73927f32bc82a09c7249b8729b4 2024-12-05T12:06:07,075 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(7797): checking classloading for 3389d73927f32bc82a09c7249b8729b4 2024-12-05T12:06:07,075 INFO [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-05T12:06:07,075 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemStateWithMergeRegion-1 ec786c79ff70ab9414a05ecd6c343e10 2024-12-05T12:06:07,075 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithMergeRegion-1,2,1733400366693.ec786c79ff70ab9414a05ecd6c343e10.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-05T12:06:07,075 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegion(7794): checking encryption for ec786c79ff70ab9414a05ecd6c343e10 2024-12-05T12:06:07,075 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegion(7797): checking classloading for ec786c79ff70ab9414a05ecd6c343e10 2024-12-05T12:06:07,076 INFO [StoreOpener-3389d73927f32bc82a09c7249b8729b4-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 3389d73927f32bc82a09c7249b8729b4 2024-12-05T12:06:07,076 INFO [StoreOpener-ec786c79ff70ab9414a05ecd6c343e10-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region ec786c79ff70ab9414a05ecd6c343e10 2024-12-05T12:06:07,077 INFO [StoreOpener-3389d73927f32bc82a09c7249b8729b4-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 3389d73927f32bc82a09c7249b8729b4 columnFamilyName cf 2024-12-05T12:06:07,077 INFO [StoreOpener-ec786c79ff70ab9414a05ecd6c343e10-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region ec786c79ff70ab9414a05ecd6c343e10 columnFamilyName cf 2024-12-05T12:06:07,077 DEBUG [StoreOpener-3389d73927f32bc82a09c7249b8729b4-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T12:06:07,077 DEBUG [StoreOpener-ec786c79ff70ab9414a05ecd6c343e10-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T12:06:07,077 INFO [StoreOpener-3389d73927f32bc82a09c7249b8729b4-1 {}] regionserver.HStore(327): Store=3389d73927f32bc82a09c7249b8729b4/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-05T12:06:07,077 INFO [StoreOpener-ec786c79ff70ab9414a05ecd6c343e10-1 {}] regionserver.HStore(327): Store=ec786c79ff70ab9414a05ecd6c343e10/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-05T12:06:07,077 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(1038): replaying wal for 3389d73927f32bc82a09c7249b8729b4 2024-12-05T12:06:07,077 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegion(1038): replaying wal for ec786c79ff70ab9414a05ecd6c343e10 2024-12-05T12:06:07,078 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/ec786c79ff70ab9414a05ecd6c343e10 2024-12-05T12:06:07,078 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/3389d73927f32bc82a09c7249b8729b4 2024-12-05T12:06:07,078 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/ec786c79ff70ab9414a05ecd6c343e10 2024-12-05T12:06:07,078 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/3389d73927f32bc82a09c7249b8729b4 2024-12-05T12:06:07,078 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(1048): stopping wal replay for 3389d73927f32bc82a09c7249b8729b4 2024-12-05T12:06:07,078 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegion(1048): stopping wal replay for ec786c79ff70ab9414a05ecd6c343e10 2024-12-05T12:06:07,078 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(1060): Cleaning up temporary data for 3389d73927f32bc82a09c7249b8729b4 2024-12-05T12:06:07,078 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegion(1060): Cleaning up temporary data for ec786c79ff70ab9414a05ecd6c343e10 2024-12-05T12:06:07,080 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(1093): writing seq id for 3389d73927f32bc82a09c7249b8729b4 2024-12-05T12:06:07,080 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegion(1093): writing seq id for ec786c79ff70ab9414a05ecd6c343e10 2024-12-05T12:06:07,081 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/ec786c79ff70ab9414a05ecd6c343e10/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-05T12:06:07,081 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/3389d73927f32bc82a09c7249b8729b4/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-05T12:06:07,081 INFO [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegion(1114): Opened ec786c79ff70ab9414a05ecd6c343e10; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=71677976, jitterRate=0.06808507442474365}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-05T12:06:07,081 INFO [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(1114): Opened 3389d73927f32bc82a09c7249b8729b4; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=75240421, jitterRate=0.12116964161396027}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-05T12:06:07,081 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 3389d73927f32bc82a09c7249b8729b4 2024-12-05T12:06:07,081 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegion(1122): Running coprocessor post-open hooks for ec786c79ff70ab9414a05ecd6c343e10 2024-12-05T12:06:07,082 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(1006): Region open journal for 3389d73927f32bc82a09c7249b8729b4: Running coprocessor pre-open hook at 1733400367075Writing region info on filesystem at 1733400367075Initializing all the Stores at 1733400367075Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733400367075Cleaning up temporary data from old regions at 1733400367078 (+3 ms)Running coprocessor post-open hooks at 1733400367081 (+3 ms)Region opened successfully at 1733400367082 (+1 ms) 2024-12-05T12:06:07,082 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegion(1006): Region open journal for ec786c79ff70ab9414a05ecd6c343e10: Running coprocessor pre-open hook at 1733400367075Writing region info on filesystem at 1733400367075Initializing all the Stores at 1733400367075Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733400367076 (+1 ms)Cleaning up temporary data from old regions at 1733400367078 (+2 ms)Running coprocessor post-open hooks at 1733400367081 (+3 ms)Region opened successfully at 1733400367082 (+1 ms) 2024-12-05T12:06:07,083 INFO [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportFileSystemStateWithMergeRegion-1,2,1733400366693.ec786c79ff70ab9414a05ecd6c343e10., pid=147, masterSystemTime=1733400367072 2024-12-05T12:06:07,083 INFO [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportFileSystemStateWithMergeRegion-1,,1733400366693.3389d73927f32bc82a09c7249b8729b4., pid=146, masterSystemTime=1733400367071 2024-12-05T12:06:07,084 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportFileSystemStateWithMergeRegion-1,2,1733400366693.ec786c79ff70ab9414a05ecd6c343e10. 2024-12-05T12:06:07,084 INFO [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] handler.AssignRegionHandler(153): Opened testtb-testExportFileSystemStateWithMergeRegion-1,2,1733400366693.ec786c79ff70ab9414a05ecd6c343e10. 2024-12-05T12:06:07,085 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=145 updating hbase:meta row=ec786c79ff70ab9414a05ecd6c343e10, regionState=OPEN, openSeqNum=2, regionLocation=80666a11a09f,42913,1733400130845 2024-12-05T12:06:07,085 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportFileSystemStateWithMergeRegion-1,,1733400366693.3389d73927f32bc82a09c7249b8729b4. 2024-12-05T12:06:07,085 INFO [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] handler.AssignRegionHandler(153): Opened testtb-testExportFileSystemStateWithMergeRegion-1,,1733400366693.3389d73927f32bc82a09c7249b8729b4. 2024-12-05T12:06:07,086 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=144 updating hbase:meta row=3389d73927f32bc82a09c7249b8729b4, regionState=OPEN, openSeqNum=2, regionLocation=80666a11a09f,42305,1733400131012 2024-12-05T12:06:07,086 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=147, ppid=145, state=RUNNABLE, hasLock=false; OpenRegionProcedure ec786c79ff70ab9414a05ecd6c343e10, server=80666a11a09f,42913,1733400130845 because future has completed 2024-12-05T12:06:07,088 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=146, ppid=144, state=RUNNABLE, hasLock=false; OpenRegionProcedure 3389d73927f32bc82a09c7249b8729b4, server=80666a11a09f,42305,1733400131012 because future has completed 2024-12-05T12:06:07,088 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportFileSystemStateWithMergeRegion-1' 2024-12-05T12:06:07,089 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=147, resume processing ppid=145 2024-12-05T12:06:07,089 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=147, ppid=145, state=SUCCESS, hasLock=false; OpenRegionProcedure ec786c79ff70ab9414a05ecd6c343e10, server=80666a11a09f,42913,1733400130845 in 166 msec 2024-12-05T12:06:07,090 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=146, resume processing ppid=144 2024-12-05T12:06:07,090 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=146, ppid=144, state=SUCCESS, hasLock=false; OpenRegionProcedure 3389d73927f32bc82a09c7249b8729b4, server=80666a11a09f,42305,1733400131012 in 168 msec 2024-12-05T12:06:07,090 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=145, ppid=143, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=ec786c79ff70ab9414a05ecd6c343e10, ASSIGN in 324 msec 2024-12-05T12:06:07,091 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=144, resume processing ppid=143 2024-12-05T12:06:07,091 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=144, ppid=143, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=3389d73927f32bc82a09c7249b8729b4, ASSIGN in 325 msec 2024-12-05T12:06:07,091 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=143, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-05T12:06:07,092 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion-1","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733400367091"}]},"ts":"1733400367091"} 2024-12-05T12:06:07,093 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithMergeRegion-1, state=ENABLED in hbase:meta 2024-12-05T12:06:07,094 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=143, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 execute state=CREATE_TABLE_POST_OPERATION 2024-12-05T12:06:07,094 DEBUG [PEWorker-5 {}] access.PermissionStorage(177): Writing permission with rowKey testtb-testExportFileSystemStateWithMergeRegion-1 jenkins: RWXCA 2024-12-05T12:06:07,096 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42305 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemStateWithMergeRegion-1], kv [jenkins: RWXCA] 2024-12-05T12:06:07,111 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34947-0x101a6a890970000, quorum=127.0.0.1:49240, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-05T12:06:07,111 DEBUG [pool-63-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42913-0x101a6a890970001, quorum=127.0.0.1:49240, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-05T12:06:07,111 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37629-0x101a6a890970002, quorum=127.0.0.1:49240, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-05T12:06:07,111 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42305-0x101a6a890970003, quorum=127.0.0.1:49240, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-05T12:06:07,134 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-12-05T12:06:07,135 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-12-05T12:06:07,135 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-12-05T12:06:07,135 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion-1 with data PBUF\x0AW\x0A\x07jenkins\x12L\x08\x03"H\x0A<\x0A\x07default\x121testtb-testExportFileSystemStateWithMergeRegion-1 \x00 \x01 \x02 \x03 \x04 2024-12-05T12:06:07,135 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion-1 with data PBUF\x0AW\x0A\x07jenkins\x12L\x08\x03"H\x0A<\x0A\x07default\x121testtb-testExportFileSystemStateWithMergeRegion-1 \x00 \x01 \x02 \x03 \x04 2024-12-05T12:06:07,135 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-12-05T12:06:07,135 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion-1 with data PBUF\x0AW\x0A\x07jenkins\x12L\x08\x03"H\x0A<\x0A\x07default\x121testtb-testExportFileSystemStateWithMergeRegion-1 \x00 \x01 \x02 \x03 \x04 2024-12-05T12:06:07,135 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion-1 with data PBUF\x0AW\x0A\x07jenkins\x12L\x08\x03"H\x0A<\x0A\x07default\x121testtb-testExportFileSystemStateWithMergeRegion-1 \x00 \x01 \x02 \x03 \x04 2024-12-05T12:06:07,136 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=143, state=SUCCESS, hasLock=false; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 in 440 msec 2024-12-05T12:06:07,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=143 2024-12-05T12:06:07,328 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion-1 completed 2024-12-05T12:06:07,330 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemStateWithMergeRegion-1', row='1', locateType=CURRENT is [region=testtb-testExportFileSystemStateWithMergeRegion-1,,1733400366693.3389d73927f32bc82a09c7249b8729b4., hostname=80666a11a09f,42305,1733400131012, seqNum=2] 2024-12-05T12:06:07,334 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemStateWithMergeRegion-1', row='2', locateType=CURRENT is [region=testtb-testExportFileSystemStateWithMergeRegion-1,2,1733400366693.ec786c79ff70ab9414a05ecd6c343e10., hostname=80666a11a09f,42913,1733400130845, seqNum=2] 2024-12-05T12:06:07,336 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithMergeRegion-1,, stopping at row=testtb-testExportFileSystemStateWithMergeRegion-1 ,, for max=2147483647 with caching=100 2024-12-05T12:06:07,347 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] master.HMaster$2(2278): Client=jenkins//172.17.0.2 merge regions [3389d73927f32bc82a09c7249b8729b4, ec786c79ff70ab9414a05ecd6c343e10] 2024-12-05T12:06:07,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] procedure2.ProcedureExecutor(1139): Stored pid=148, state=RUNNABLE:MERGE_TABLE_REGIONS_PREPARE, hasLock=false; MergeTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, regions=[3389d73927f32bc82a09c7249b8729b4, ec786c79ff70ab9414a05ecd6c343e10], force=true 2024-12-05T12:06:07,352 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=148, state=RUNNABLE:MERGE_TABLE_REGIONS_PREPARE, hasLock=false; MergeTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, regions=[3389d73927f32bc82a09c7249b8729b4, ec786c79ff70ab9414a05ecd6c343e10], force=true 2024-12-05T12:06:07,352 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=148, state=RUNNABLE:MERGE_TABLE_REGIONS_PREPARE, hasLock=false; MergeTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, regions=[3389d73927f32bc82a09c7249b8729b4, ec786c79ff70ab9414a05ecd6c343e10], force=true 2024-12-05T12:06:07,352 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=148, state=RUNNABLE:MERGE_TABLE_REGIONS_PREPARE, hasLock=false; MergeTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, regions=[3389d73927f32bc82a09c7249b8729b4, ec786c79ff70ab9414a05ecd6c343e10], force=true 2024-12-05T12:06:07,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=148 2024-12-05T12:06:07,359 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=149, ppid=148, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=3389d73927f32bc82a09c7249b8729b4, UNASSIGN}, {pid=150, ppid=148, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=ec786c79ff70ab9414a05ecd6c343e10, UNASSIGN}] 2024-12-05T12:06:07,360 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=149, ppid=148, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=3389d73927f32bc82a09c7249b8729b4, UNASSIGN 2024-12-05T12:06:07,360 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=150, ppid=148, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=ec786c79ff70ab9414a05ecd6c343e10, UNASSIGN 2024-12-05T12:06:07,361 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=150 updating hbase:meta row=ec786c79ff70ab9414a05ecd6c343e10, regionState=CLOSING, regionLocation=80666a11a09f,42913,1733400130845 2024-12-05T12:06:07,361 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=149 updating hbase:meta row=3389d73927f32bc82a09c7249b8729b4, regionState=CLOSING, regionLocation=80666a11a09f,42305,1733400131012 2024-12-05T12:06:07,362 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=150, ppid=148, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=ec786c79ff70ab9414a05ecd6c343e10, UNASSIGN because future has completed 2024-12-05T12:06:07,363 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: true: evictOnSplit: true: evictOnClose: false 2024-12-05T12:06:07,363 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=151, ppid=150, state=RUNNABLE, hasLock=false; CloseRegionProcedure ec786c79ff70ab9414a05ecd6c343e10, server=80666a11a09f,42913,1733400130845}] 2024-12-05T12:06:07,363 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=149, ppid=148, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=3389d73927f32bc82a09c7249b8729b4, UNASSIGN because future has completed 2024-12-05T12:06:07,363 DEBUG [PEWorker-5 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: true: evictOnSplit: true: evictOnClose: false 2024-12-05T12:06:07,364 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=152, ppid=149, state=RUNNABLE, hasLock=false; CloseRegionProcedure 3389d73927f32bc82a09c7249b8729b4, server=80666a11a09f,42305,1733400131012}] 2024-12-05T12:06:07,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=148 2024-12-05T12:06:07,515 INFO [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] handler.UnassignRegionHandler(122): Close ec786c79ff70ab9414a05ecd6c343e10 2024-12-05T12:06:07,515 DEBUG [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] handler.UnassignRegionHandler(136): Unassign region: split region: true: evictCache: true 2024-12-05T12:06:07,515 DEBUG [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.HRegion(1722): Closing ec786c79ff70ab9414a05ecd6c343e10, disabling compactions & flushes 2024-12-05T12:06:07,515 INFO [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithMergeRegion-1,2,1733400366693.ec786c79ff70ab9414a05ecd6c343e10. 2024-12-05T12:06:07,515 DEBUG [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithMergeRegion-1,2,1733400366693.ec786c79ff70ab9414a05ecd6c343e10. 2024-12-05T12:06:07,515 DEBUG [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion-1,2,1733400366693.ec786c79ff70ab9414a05ecd6c343e10. after waiting 0 ms 2024-12-05T12:06:07,515 DEBUG [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion-1,2,1733400366693.ec786c79ff70ab9414a05ecd6c343e10. 2024-12-05T12:06:07,515 INFO [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.HRegion(2902): Flushing ec786c79ff70ab9414a05ecd6c343e10 1/1 column families, dataSize=24 B heapSize=352 B 2024-12-05T12:06:07,516 INFO [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] handler.UnassignRegionHandler(122): Close 3389d73927f32bc82a09c7249b8729b4 2024-12-05T12:06:07,516 DEBUG [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] handler.UnassignRegionHandler(136): Unassign region: split region: true: evictCache: true 2024-12-05T12:06:07,516 DEBUG [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] regionserver.HRegion(1722): Closing 3389d73927f32bc82a09c7249b8729b4, disabling compactions & flushes 2024-12-05T12:06:07,516 INFO [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithMergeRegion-1,,1733400366693.3389d73927f32bc82a09c7249b8729b4. 2024-12-05T12:06:07,516 DEBUG [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithMergeRegion-1,,1733400366693.3389d73927f32bc82a09c7249b8729b4. 2024-12-05T12:06:07,516 DEBUG [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion-1,,1733400366693.3389d73927f32bc82a09c7249b8729b4. after waiting 0 ms 2024-12-05T12:06:07,516 DEBUG [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion-1,,1733400366693.3389d73927f32bc82a09c7249b8729b4. 2024-12-05T12:06:07,516 INFO [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] regionserver.HRegion(2902): Flushing 3389d73927f32bc82a09c7249b8729b4 1/1 column families, dataSize=24 B heapSize=352 B 2024-12-05T12:06:07,520 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_693627247/yarn-7091037402/MiniMRCluster_693627247-localDir-nm-0_0/usercache/jenkins/appcache/application_1733400137807_0006/container_1733400137807_0006_01_000004/launch_container.sh] 2024-12-05T12:06:07,520 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_693627247/yarn-7091037402/MiniMRCluster_693627247-localDir-nm-0_0/usercache/jenkins/appcache/application_1733400137807_0006/container_1733400137807_0006_01_000004/container_tokens] 2024-12-05T12:06:07,520 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_693627247/yarn-7091037402/MiniMRCluster_693627247-localDir-nm-0_0/usercache/jenkins/appcache/application_1733400137807_0006/container_1733400137807_0006_01_000004/sysfs] 2024-12-05T12:06:07,536 DEBUG [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/3389d73927f32bc82a09c7249b8729b4/.tmp/cf/8d310a29f11f468ca95f2da88d1c5019 is 28, key is 1/cf:/1733400367331/Put/seqid=0 2024-12-05T12:06:07,536 DEBUG [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/ec786c79ff70ab9414a05ecd6c343e10/.tmp/cf/8792523f1d144fdcb7d36595171cd14d is 28, key is 2/cf:/1733400367335/Put/seqid=0 2024-12-05T12:06:07,540 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742197_1373 (size=4945) 2024-12-05T12:06:07,540 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742197_1373 (size=4945) 2024-12-05T12:06:07,540 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742196_1372 (size=4945) 2024-12-05T12:06:07,540 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742197_1373 (size=4945) 2024-12-05T12:06:07,541 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742196_1372 (size=4945) 2024-12-05T12:06:07,541 INFO [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24 B at sequenceid=5 (bloomFilter=true), to=hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/ec786c79ff70ab9414a05ecd6c343e10/.tmp/cf/8792523f1d144fdcb7d36595171cd14d 2024-12-05T12:06:07,541 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742196_1372 (size=4945) 2024-12-05T12:06:07,541 INFO [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24 B at sequenceid=5 (bloomFilter=true), to=hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/3389d73927f32bc82a09c7249b8729b4/.tmp/cf/8d310a29f11f468ca95f2da88d1c5019 2024-12-05T12:06:07,545 DEBUG [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/ec786c79ff70ab9414a05ecd6c343e10/.tmp/cf/8792523f1d144fdcb7d36595171cd14d as hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/ec786c79ff70ab9414a05ecd6c343e10/cf/8792523f1d144fdcb7d36595171cd14d 2024-12-05T12:06:07,545 DEBUG [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/3389d73927f32bc82a09c7249b8729b4/.tmp/cf/8d310a29f11f468ca95f2da88d1c5019 as hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/3389d73927f32bc82a09c7249b8729b4/cf/8d310a29f11f468ca95f2da88d1c5019 2024-12-05T12:06:07,549 INFO [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/3389d73927f32bc82a09c7249b8729b4/cf/8d310a29f11f468ca95f2da88d1c5019, entries=1, sequenceid=5, filesize=4.8 K 2024-12-05T12:06:07,549 INFO [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/ec786c79ff70ab9414a05ecd6c343e10/cf/8792523f1d144fdcb7d36595171cd14d, entries=1, sequenceid=5, filesize=4.8 K 2024-12-05T12:06:07,550 INFO [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] regionserver.HRegion(3140): Finished flush of dataSize ~24 B/24, heapSize ~336 B/336, currentSize=0 B/0 for 3389d73927f32bc82a09c7249b8729b4 in 34ms, sequenceid=5, compaction requested=false 2024-12-05T12:06:07,550 INFO [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.HRegion(3140): Finished flush of dataSize ~24 B/24, heapSize ~336 B/336, currentSize=0 B/0 for ec786c79ff70ab9414a05ecd6c343e10 in 35ms, sequenceid=5, compaction requested=false 2024-12-05T12:06:07,563 DEBUG [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/3389d73927f32bc82a09c7249b8729b4/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-12-05T12:06:07,563 DEBUG [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/ec786c79ff70ab9414a05ecd6c343e10/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-12-05T12:06:07,563 DEBUG [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-05T12:06:07,563 INFO [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithMergeRegion-1,,1733400366693.3389d73927f32bc82a09c7249b8729b4. 2024-12-05T12:06:07,564 DEBUG [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] regionserver.HRegion(1676): Region close journal for 3389d73927f32bc82a09c7249b8729b4: Waiting for close lock at 1733400367516Running coprocessor pre-close hooks at 1733400367516Disabling compacts and flushes for region at 1733400367516Disabling writes for close at 1733400367516Obtaining lock to block concurrent updates at 1733400367516Preparing flush snapshotting stores in 3389d73927f32bc82a09c7249b8729b4 at 1733400367516Finished memstore snapshotting testtb-testExportFileSystemStateWithMergeRegion-1,,1733400366693.3389d73927f32bc82a09c7249b8729b4., syncing WAL and waiting on mvcc, flushsize=dataSize=24, getHeapSize=336, getOffHeapSize=0, getCellsCount=1 at 1733400367516Flushing stores of testtb-testExportFileSystemStateWithMergeRegion-1,,1733400366693.3389d73927f32bc82a09c7249b8729b4. at 1733400367517 (+1 ms)Flushing 3389d73927f32bc82a09c7249b8729b4/cf: creating writer at 1733400367517Flushing 3389d73927f32bc82a09c7249b8729b4/cf: appending metadata at 1733400367535 (+18 ms)Flushing 3389d73927f32bc82a09c7249b8729b4/cf: closing flushed file at 1733400367535Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@48552f94: reopening flushed file at 1733400367545 (+10 ms)Finished flush of dataSize ~24 B/24, heapSize ~336 B/336, currentSize=0 B/0 for 3389d73927f32bc82a09c7249b8729b4 in 34ms, sequenceid=5, compaction requested=false at 1733400367550 (+5 ms)Writing region close event to WAL at 1733400367557 (+7 ms)Running coprocessor post-close hooks at 1733400367563 (+6 ms)Closed at 1733400367563 2024-12-05T12:06:07,564 DEBUG [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-05T12:06:07,564 INFO [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithMergeRegion-1,2,1733400366693.ec786c79ff70ab9414a05ecd6c343e10. 2024-12-05T12:06:07,564 DEBUG [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.HRegion(1676): Region close journal for ec786c79ff70ab9414a05ecd6c343e10: Waiting for close lock at 1733400367515Running coprocessor pre-close hooks at 1733400367515Disabling compacts and flushes for region at 1733400367515Disabling writes for close at 1733400367515Obtaining lock to block concurrent updates at 1733400367516 (+1 ms)Preparing flush snapshotting stores in ec786c79ff70ab9414a05ecd6c343e10 at 1733400367516Finished memstore snapshotting testtb-testExportFileSystemStateWithMergeRegion-1,2,1733400366693.ec786c79ff70ab9414a05ecd6c343e10., syncing WAL and waiting on mvcc, flushsize=dataSize=24, getHeapSize=336, getOffHeapSize=0, getCellsCount=1 at 1733400367516Flushing stores of testtb-testExportFileSystemStateWithMergeRegion-1,2,1733400366693.ec786c79ff70ab9414a05ecd6c343e10. at 1733400367516Flushing ec786c79ff70ab9414a05ecd6c343e10/cf: creating writer at 1733400367517 (+1 ms)Flushing ec786c79ff70ab9414a05ecd6c343e10/cf: appending metadata at 1733400367535 (+18 ms)Flushing ec786c79ff70ab9414a05ecd6c343e10/cf: closing flushed file at 1733400367535Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5c738cf5: reopening flushed file at 1733400367545 (+10 ms)Finished flush of dataSize ~24 B/24, heapSize ~336 B/336, currentSize=0 B/0 for ec786c79ff70ab9414a05ecd6c343e10 in 35ms, sequenceid=5, compaction requested=false at 1733400367550 (+5 ms)Writing region close event to WAL at 1733400367557 (+7 ms)Running coprocessor post-close hooks at 1733400367564 (+7 ms)Closed at 1733400367564 2024-12-05T12:06:07,565 INFO [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] handler.UnassignRegionHandler(157): Closed 3389d73927f32bc82a09c7249b8729b4 2024-12-05T12:06:07,566 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=149 updating hbase:meta row=3389d73927f32bc82a09c7249b8729b4, regionState=CLOSED 2024-12-05T12:06:07,566 INFO [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] handler.UnassignRegionHandler(157): Closed ec786c79ff70ab9414a05ecd6c343e10 2024-12-05T12:06:07,566 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=150 updating hbase:meta row=ec786c79ff70ab9414a05ecd6c343e10, regionState=CLOSED 2024-12-05T12:06:07,567 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=152, ppid=149, state=RUNNABLE, hasLock=false; CloseRegionProcedure 3389d73927f32bc82a09c7249b8729b4, server=80666a11a09f,42305,1733400131012 because future has completed 2024-12-05T12:06:07,568 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=151, ppid=150, state=RUNNABLE, hasLock=false; CloseRegionProcedure ec786c79ff70ab9414a05ecd6c343e10, server=80666a11a09f,42913,1733400130845 because future has completed 2024-12-05T12:06:07,569 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=152, resume processing ppid=149 2024-12-05T12:06:07,569 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=152, ppid=149, state=SUCCESS, hasLock=false; CloseRegionProcedure 3389d73927f32bc82a09c7249b8729b4, server=80666a11a09f,42305,1733400131012 in 205 msec 2024-12-05T12:06:07,570 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=149, ppid=148, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=3389d73927f32bc82a09c7249b8729b4, UNASSIGN in 210 msec 2024-12-05T12:06:07,570 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=151, resume processing ppid=150 2024-12-05T12:06:07,570 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=151, ppid=150, state=SUCCESS, hasLock=false; CloseRegionProcedure ec786c79ff70ab9414a05ecd6c343e10, server=80666a11a09f,42913,1733400130845 in 205 msec 2024-12-05T12:06:07,571 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=150, resume processing ppid=148 2024-12-05T12:06:07,571 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=150, ppid=148, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=ec786c79ff70ab9414a05ecd6c343e10, UNASSIGN in 211 msec 2024-12-05T12:06:07,583 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742198_1374 (size=84) 2024-12-05T12:06:07,583 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742198_1374 (size=84) 2024-12-05T12:06:07,583 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742198_1374 (size=84) 2024-12-05T12:06:07,585 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T12:06:07,590 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742199_1375 (size=20) 2024-12-05T12:06:07,591 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742199_1375 (size=20) 2024-12-05T12:06:07,591 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742199_1375 (size=20) 2024-12-05T12:06:07,592 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T12:06:07,598 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742200_1376 (size=21) 2024-12-05T12:06:07,599 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742200_1376 (size=21) 2024-12-05T12:06:07,599 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742200_1376 (size=21) 2024-12-05T12:06:07,606 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742201_1377 (size=84) 2024-12-05T12:06:07,606 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742201_1377 (size=84) 2024-12-05T12:06:07,606 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742201_1377 (size=84) 2024-12-05T12:06:07,607 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T12:06:07,616 DEBUG [PEWorker-3 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/68a973778e2ec0a6c9355eecf138a149/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=-1 2024-12-05T12:06:07,618 DEBUG [PEWorker-3 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion-1,,1733400366693.3389d73927f32bc82a09c7249b8729b4.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"9223372036854775807"}]},"ts":"9223372036854775807"} 2024-12-05T12:06:07,618 DEBUG [PEWorker-3 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion-1,2,1733400366693.ec786c79ff70ab9414a05ecd6c343e10.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"9223372036854775807"}]},"ts":"9223372036854775807"} 2024-12-05T12:06:07,618 DEBUG [PEWorker-3 {}] assignment.RegionStateStore(723): Put {"totalColumns":7,"row":"testtb-testExportFileSystemStateWithMergeRegion-1,,1733400366694.68a973778e2ec0a6c9355eecf138a149.","families":{"info":[{"qualifier":"regioninfo","vlen":83,"tag":[],"timestamp":"9223372036854775807"},{"qualifier":"merge0000","vlen":84,"tag":[],"timestamp":"9223372036854775807"},{"qualifier":"merge0001","vlen":84,"tag":[],"timestamp":"9223372036854775807"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"9223372036854775807"}]},"ts":"9223372036854775807"} 2024-12-05T12:06:07,623 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=153, ppid=148, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=68a973778e2ec0a6c9355eecf138a149, ASSIGN}] 2024-12-05T12:06:07,624 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=153, ppid=148, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=68a973778e2ec0a6c9355eecf138a149, ASSIGN 2024-12-05T12:06:07,625 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(269): Starting pid=153, ppid=148, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=68a973778e2ec0a6c9355eecf138a149, ASSIGN; state=MERGED, location=80666a11a09f,42305,1733400131012; forceNewPlan=false, retain=false 2024-12-05T12:06:07,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=148 2024-12-05T12:06:07,775 INFO [80666a11a09f:34947 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-12-05T12:06:07,775 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=153 updating hbase:meta row=68a973778e2ec0a6c9355eecf138a149, regionState=OPENING, regionLocation=80666a11a09f,42305,1733400131012 2024-12-05T12:06:07,777 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=153, ppid=148, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=68a973778e2ec0a6c9355eecf138a149, ASSIGN because future has completed 2024-12-05T12:06:07,777 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=154, ppid=153, state=RUNNABLE, hasLock=false; OpenRegionProcedure 68a973778e2ec0a6c9355eecf138a149, server=80666a11a09f,42305,1733400131012}] 2024-12-05T12:06:07,933 INFO [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] handler.AssignRegionHandler(132): Open testtb-testExportFileSystemStateWithMergeRegion-1,,1733400366694.68a973778e2ec0a6c9355eecf138a149. 2024-12-05T12:06:07,933 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.HRegion(7752): Opening region: {ENCODED => 68a973778e2ec0a6c9355eecf138a149, NAME => 'testtb-testExportFileSystemStateWithMergeRegion-1,,1733400366694.68a973778e2ec0a6c9355eecf138a149.', STARTKEY => '', ENDKEY => ''} 2024-12-05T12:06:07,934 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportFileSystemStateWithMergeRegion-1,,1733400366694.68a973778e2ec0a6c9355eecf138a149. service=AccessControlService 2024-12-05T12:06:07,934 INFO [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-05T12:06:07,934 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemStateWithMergeRegion-1 68a973778e2ec0a6c9355eecf138a149 2024-12-05T12:06:07,934 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithMergeRegion-1,,1733400366694.68a973778e2ec0a6c9355eecf138a149.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-05T12:06:07,934 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.HRegion(7794): checking encryption for 68a973778e2ec0a6c9355eecf138a149 2024-12-05T12:06:07,935 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.HRegion(7797): checking classloading for 68a973778e2ec0a6c9355eecf138a149 2024-12-05T12:06:07,937 INFO [StoreOpener-68a973778e2ec0a6c9355eecf138a149-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 68a973778e2ec0a6c9355eecf138a149 2024-12-05T12:06:07,938 INFO [StoreOpener-68a973778e2ec0a6c9355eecf138a149-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 68a973778e2ec0a6c9355eecf138a149 columnFamilyName cf 2024-12-05T12:06:07,938 DEBUG [StoreOpener-68a973778e2ec0a6c9355eecf138a149-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T12:06:07,949 DEBUG [StoreOpener-68a973778e2ec0a6c9355eecf138a149-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/68a973778e2ec0a6c9355eecf138a149/cf/8792523f1d144fdcb7d36595171cd14d.ec786c79ff70ab9414a05ecd6c343e10->hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/ec786c79ff70ab9414a05ecd6c343e10/cf/8792523f1d144fdcb7d36595171cd14d-top 2024-12-05T12:06:07,953 DEBUG [StoreOpener-68a973778e2ec0a6c9355eecf138a149-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/68a973778e2ec0a6c9355eecf138a149/cf/8d310a29f11f468ca95f2da88d1c5019.3389d73927f32bc82a09c7249b8729b4->hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/3389d73927f32bc82a09c7249b8729b4/cf/8d310a29f11f468ca95f2da88d1c5019-top 2024-12-05T12:06:07,953 INFO [StoreOpener-68a973778e2ec0a6c9355eecf138a149-1 {}] regionserver.HStore(327): Store=68a973778e2ec0a6c9355eecf138a149/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-05T12:06:07,954 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.HRegion(1038): replaying wal for 68a973778e2ec0a6c9355eecf138a149 2024-12-05T12:06:07,954 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/68a973778e2ec0a6c9355eecf138a149 2024-12-05T12:06:07,955 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/68a973778e2ec0a6c9355eecf138a149 2024-12-05T12:06:07,956 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.HRegion(1048): stopping wal replay for 68a973778e2ec0a6c9355eecf138a149 2024-12-05T12:06:07,956 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.HRegion(1060): Cleaning up temporary data for 68a973778e2ec0a6c9355eecf138a149 2024-12-05T12:06:07,958 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.HRegion(1093): writing seq id for 68a973778e2ec0a6c9355eecf138a149 2024-12-05T12:06:07,958 INFO [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.HRegion(1114): Opened 68a973778e2ec0a6c9355eecf138a149; next sequenceid=9; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=59832928, jitterRate=-0.10841989517211914}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-05T12:06:07,958 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 68a973778e2ec0a6c9355eecf138a149 2024-12-05T12:06:07,959 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.HRegion(1006): Region open journal for 68a973778e2ec0a6c9355eecf138a149: Running coprocessor pre-open hook at 1733400367935Writing region info on filesystem at 1733400367935Initializing all the Stores at 1733400367936 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733400367936Cleaning up temporary data from old regions at 1733400367956 (+20 ms)Running coprocessor post-open hooks at 1733400367958 (+2 ms)Region opened successfully at 1733400367959 (+1 ms) 2024-12-05T12:06:07,959 INFO [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportFileSystemStateWithMergeRegion-1,,1733400366694.68a973778e2ec0a6c9355eecf138a149., pid=154, masterSystemTime=1733400367929 2024-12-05T12:06:07,960 INFO [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.CompactSplit(342): Ignoring compaction request for testtb-testExportFileSystemStateWithMergeRegion-1,,1733400366694.68a973778e2ec0a6c9355eecf138a149.,because compaction is disabled. 2024-12-05T12:06:07,961 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportFileSystemStateWithMergeRegion-1,,1733400366694.68a973778e2ec0a6c9355eecf138a149. 2024-12-05T12:06:07,961 INFO [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] handler.AssignRegionHandler(153): Opened testtb-testExportFileSystemStateWithMergeRegion-1,,1733400366694.68a973778e2ec0a6c9355eecf138a149. 2024-12-05T12:06:07,961 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=153 updating hbase:meta row=68a973778e2ec0a6c9355eecf138a149, regionState=OPEN, openSeqNum=9, regionLocation=80666a11a09f,42305,1733400131012 2024-12-05T12:06:07,963 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=154, ppid=153, state=RUNNABLE, hasLock=false; OpenRegionProcedure 68a973778e2ec0a6c9355eecf138a149, server=80666a11a09f,42305,1733400131012 because future has completed 2024-12-05T12:06:07,966 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=154, resume processing ppid=153 2024-12-05T12:06:07,966 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=154, ppid=153, state=SUCCESS, hasLock=false; OpenRegionProcedure 68a973778e2ec0a6c9355eecf138a149, server=80666a11a09f,42305,1733400131012 in 187 msec 2024-12-05T12:06:07,967 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=153, resume processing ppid=148 2024-12-05T12:06:07,967 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=153, ppid=148, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=68a973778e2ec0a6c9355eecf138a149, ASSIGN in 343 msec 2024-12-05T12:06:07,970 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=148, state=SUCCESS, hasLock=false; MergeTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, regions=[3389d73927f32bc82a09c7249b8729b4, ec786c79ff70ab9414a05ecd6c343e10], force=true in 618 msec 2024-12-05T12:06:07,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=148 2024-12-05T12:06:07,978 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: MERGE_REGIONS, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion-1 completed 2024-12-05T12:06:07,979 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } 2024-12-05T12:06:07,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1733400367979 (current time:1733400367979). 2024-12-05T12:06:07,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-05T12:06:07,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snaptb0-testExportFileSystemStateWithMergeRegion-1 VERSION not specified, setting to 2 2024-12-05T12:06:07,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-05T12:06:07,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@42309f5b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T12:06:07,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] client.ClusterIdFetcher(90): Going to request 80666a11a09f,34947,-1 for getting cluster id 2024-12-05T12:06:07,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-05T12:06:07,981 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'af3bc8f5-5d92-4279-9ca6-4b73c59b8f54' 2024-12-05T12:06:07,981 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-05T12:06:07,981 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "af3bc8f5-5d92-4279-9ca6-4b73c59b8f54" 2024-12-05T12:06:07,981 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6e1cfc9e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T12:06:07,981 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [80666a11a09f,34947,-1] 2024-12-05T12:06:07,981 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-05T12:06:07,982 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T12:06:07,982 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37670, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-05T12:06:07,983 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@72bb4af6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T12:06:07,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-05T12:06:07,984 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=80666a11a09f,37629,1733400130958, seqNum=-1] 2024-12-05T12:06:07,984 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-05T12:06:07,985 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55518, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-05T12:06:07,987 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947. 2024-12-05T12:06:07,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-05T12:06:07,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T12:06:07,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T12:06:07,987 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-05T12:06:07,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1ca7d94d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T12:06:07,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] client.ClusterIdFetcher(90): Going to request 80666a11a09f,34947,-1 for getting cluster id 2024-12-05T12:06:07,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-05T12:06:07,989 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'af3bc8f5-5d92-4279-9ca6-4b73c59b8f54' 2024-12-05T12:06:07,989 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-05T12:06:07,989 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "af3bc8f5-5d92-4279-9ca6-4b73c59b8f54" 2024-12-05T12:06:07,989 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3ffa4db8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T12:06:07,989 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [80666a11a09f,34947,-1] 2024-12-05T12:06:07,989 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-05T12:06:07,989 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T12:06:07,990 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37700, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-05T12:06:07,990 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6e38bfec, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T12:06:07,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-05T12:06:07,992 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=80666a11a09f,37629,1733400130958, seqNum=-1] 2024-12-05T12:06:07,992 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-05T12:06:07,993 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55528, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-05T12:06:07,995 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportFileSystemStateWithMergeRegion-1', locateType=CURRENT is [region=hbase:acl,,1733400133831.02119c1ced80736cbd0ab02ca19ee30c., hostname=80666a11a09f,42305,1733400131012, seqNum=2] 2024-12-05T12:06:07,995 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-05T12:06:07,996 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37830, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-05T12:06:07,997 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947. 2024-12-05T12:06:07,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor153.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-05T12:06:07,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T12:06:07,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T12:06:07,998 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-05T12:06:07,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemStateWithMergeRegion-1], kv [jenkins: RWXCA] 2024-12-05T12:06:07,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-05T12:06:07,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] procedure2.ProcedureExecutor(1139): Stored pid=155, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=155, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } 2024-12-05T12:06:07,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 }, snapshot procedure id = 155 2024-12-05T12:06:08,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=155 2024-12-05T12:06:08,000 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=155, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=155, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-05T12:06:08,001 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=155, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=155, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-05T12:06:08,003 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=155, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=155, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-05T12:06:08,008 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742202_1378 (size=216) 2024-12-05T12:06:08,008 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742202_1378 (size=216) 2024-12-05T12:06:08,009 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742202_1378 (size=216) 2024-12-05T12:06:08,010 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=155, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=155, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-05T12:06:08,010 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=156, ppid=155, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 68a973778e2ec0a6c9355eecf138a149}] 2024-12-05T12:06:08,011 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=156, ppid=155, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 68a973778e2ec0a6c9355eecf138a149 2024-12-05T12:06:08,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=155 2024-12-05T12:06:08,162 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42305 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=156 2024-12-05T12:06:08,163 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=156}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithMergeRegion-1,,1733400366694.68a973778e2ec0a6c9355eecf138a149. 2024-12-05T12:06:08,163 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=156}] regionserver.HRegion(2603): Flush status journal for 68a973778e2ec0a6c9355eecf138a149: 2024-12-05T12:06:08,163 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=156}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithMergeRegion-1,,1733400366694.68a973778e2ec0a6c9355eecf138a149. for snaptb0-testExportFileSystemStateWithMergeRegion-1 completed. 2024-12-05T12:06:08,163 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=156}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithMergeRegion-1,,1733400366694.68a973778e2ec0a6c9355eecf138a149.' region-info for snapshot=snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-12-05T12:06:08,163 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=156}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-05T12:06:08,163 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=156}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/68a973778e2ec0a6c9355eecf138a149/cf/8792523f1d144fdcb7d36595171cd14d.ec786c79ff70ab9414a05ecd6c343e10->hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/ec786c79ff70ab9414a05ecd6c343e10/cf/8792523f1d144fdcb7d36595171cd14d-top, hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/68a973778e2ec0a6c9355eecf138a149/cf/8d310a29f11f468ca95f2da88d1c5019.3389d73927f32bc82a09c7249b8729b4->hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/3389d73927f32bc82a09c7249b8729b4/cf/8d310a29f11f468ca95f2da88d1c5019-top] hfiles 2024-12-05T12:06:08,163 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=156}] snapshot.SnapshotManifest(265): Adding reference for file (1/2): hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/68a973778e2ec0a6c9355eecf138a149/cf/8792523f1d144fdcb7d36595171cd14d.ec786c79ff70ab9414a05ecd6c343e10 for snapshot=snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-12-05T12:06:08,164 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=156}] snapshot.SnapshotManifest(265): Adding reference for file (2/2): hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/68a973778e2ec0a6c9355eecf138a149/cf/8d310a29f11f468ca95f2da88d1c5019.3389d73927f32bc82a09c7249b8729b4 for snapshot=snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-12-05T12:06:08,172 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742203_1379 (size=269) 2024-12-05T12:06:08,172 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742203_1379 (size=269) 2024-12-05T12:06:08,172 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742203_1379 (size=269) 2024-12-05T12:06:08,172 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=156}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithMergeRegion-1,,1733400366694.68a973778e2ec0a6c9355eecf138a149. 2024-12-05T12:06:08,172 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=156}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=156 2024-12-05T12:06:08,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] master.HMaster(4169): Remote procedure done, pid=156 2024-12-05T12:06:08,173 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportFileSystemStateWithMergeRegion-1 on region 68a973778e2ec0a6c9355eecf138a149 2024-12-05T12:06:08,173 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=156, ppid=155, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 68a973778e2ec0a6c9355eecf138a149 2024-12-05T12:06:08,175 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=156, resume processing ppid=155 2024-12-05T12:06:08,175 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=155, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=155, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-05T12:06:08,175 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=156, ppid=155, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 68a973778e2ec0a6c9355eecf138a149 in 164 msec 2024-12-05T12:06:08,176 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=155, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=155, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-05T12:06:08,176 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=155, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=155, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-05T12:06:08,176 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-12-05T12:06:08,177 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-12-05T12:06:08,186 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742204_1380 (size=670) 2024-12-05T12:06:08,186 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742204_1380 (size=670) 2024-12-05T12:06:08,186 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742204_1380 (size=670) 2024-12-05T12:06:08,188 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=155, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=155, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-05T12:06:08,193 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=155, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=155, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-05T12:06:08,193 DEBUG [PEWorker-4 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithMergeRegion-1 to hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-12-05T12:06:08,194 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=155, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=155, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-05T12:06:08,194 DEBUG [PEWorker-4 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 }, snapshot procedure id = 155 2024-12-05T12:06:08,195 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=155, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=155, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } in 195 msec 2024-12-05T12:06:08,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=155 2024-12-05T12:06:08,318 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion-1 completed 2024-12-05T12:06:08,318 INFO [Time-limited test {}] snapshot.TestExportSnapshot(515): HDFS export destination path: hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/export-test/export-1733400368318 2024-12-05T12:06:08,318 INFO [Time-limited test {}] snapshot.TestExportSnapshot(542): tgtFsUri=hdfs://localhost:37541, tgtDir=hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/export-test/export-1733400368318, rawTgtDir=hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/export-test/export-1733400368318, srcFsUri=hdfs://localhost:37541, srcDir=hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300 2024-12-05T12:06:08,340 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1094): inputFs=hdfs://localhost:37541, inputRoot=hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300 2024-12-05T12:06:08,341 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1095): outputFs=DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-954888707_22, ugi=jenkins (auth:SIMPLE)]], outputRoot=hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/export-test/export-1733400368318, skipTmp=false, initialOutputSnapshotDir=hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/export-test/export-1733400368318/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-12-05T12:06:08,342 INFO [Time-limited test {}] snapshot.ExportSnapshot(1104): Verify the source snapshot's expiration status and integrity. 2024-12-05T12:06:08,366 INFO [Time-limited test {}] snapshot.ExportSnapshot(1162): Copy Snapshot Manifest from hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1 to hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/export-test/export-1733400368318/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-12-05T12:06:08,376 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742205_1381 (size=216) 2024-12-05T12:06:08,376 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742205_1381 (size=216) 2024-12-05T12:06:08,376 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742205_1381 (size=216) 2024-12-05T12:06:08,377 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742206_1382 (size=670) 2024-12-05T12:06:08,378 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742206_1382 (size=670) 2024-12-05T12:06:08,378 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742206_1382 (size=670) 2024-12-05T12:06:08,379 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-common/target/hbase-common-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-05T12:06:08,379 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-protocol-shaded/target/hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-05T12:06:08,379 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-client/target/hbase-client-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-05T12:06:08,566 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733400137807_0006_000001 (auth:SIMPLE) from 127.0.0.1:40226 2024-12-05T12:06:08,577 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_693627247/yarn-7091037402/MiniMRCluster_693627247-localDir-nm-0_2/usercache/jenkins/appcache/application_1733400137807_0006/container_1733400137807_0006_01_000001/launch_container.sh] 2024-12-05T12:06:08,577 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_693627247/yarn-7091037402/MiniMRCluster_693627247-localDir-nm-0_2/usercache/jenkins/appcache/application_1733400137807_0006/container_1733400137807_0006_01_000001/container_tokens] 2024-12-05T12:06:08,577 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_693627247/yarn-7091037402/MiniMRCluster_693627247-localDir-nm-0_2/usercache/jenkins/appcache/application_1733400137807_0006/container_1733400137807_0006_01_000001/sysfs] 2024-12-05T12:06:08,982 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-05T12:06:09,337 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/b25dcafd-a00c-c924-93f8-4d057d2ca201/hadoop-9268465343914220538.jar 2024-12-05T12:06:09,337 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-05T12:06:09,338 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-05T12:06:09,416 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/b25dcafd-a00c-c924-93f8-4d057d2ca201/hadoop-15015327476406273335.jar 2024-12-05T12:06:09,417 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics/target/hbase-metrics-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-05T12:06:09,417 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics-api/target/hbase-metrics-api-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-05T12:06:09,417 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-replication/target/hbase-replication-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-05T12:06:09,418 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-http/target/hbase-http-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-05T12:06:09,418 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-procedure/target/hbase-procedure-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-05T12:06:09,418 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-zookeeper/target/hbase-zookeeper-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-05T12:06:09,418 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-12-05T12:06:09,419 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-12-05T12:06:09,419 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-12-05T12:06:09,419 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-12-05T12:06:09,419 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-12-05T12:06:09,420 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-12-05T12:06:09,420 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-12-05T12:06:09,420 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-12-05T12:06:09,420 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-12-05T12:06:09,421 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-12-05T12:06:09,421 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-12-05T12:06:09,421 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-05T12:06:09,422 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-05T12:06:09,422 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-05T12:06:09,422 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-05T12:06:09,423 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-05T12:06:09,423 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-05T12:06:09,423 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-05T12:06:09,496 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742207_1383 (size=131440) 2024-12-05T12:06:09,496 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742207_1383 (size=131440) 2024-12-05T12:06:09,496 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742207_1383 (size=131440) 2024-12-05T12:06:09,518 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742208_1384 (size=4188619) 2024-12-05T12:06:09,518 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742208_1384 (size=4188619) 2024-12-05T12:06:09,519 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742208_1384 (size=4188619) 2024-12-05T12:06:09,572 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742209_1385 (size=1323991) 2024-12-05T12:06:09,572 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742209_1385 (size=1323991) 2024-12-05T12:06:09,572 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742209_1385 (size=1323991) 2024-12-05T12:06:09,608 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742210_1386 (size=903934) 2024-12-05T12:06:09,608 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742210_1386 (size=903934) 2024-12-05T12:06:09,609 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742210_1386 (size=903934) 2024-12-05T12:06:09,647 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742211_1387 (size=8360360) 2024-12-05T12:06:09,647 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742211_1387 (size=8360360) 2024-12-05T12:06:09,647 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742211_1387 (size=8360360) 2024-12-05T12:06:09,680 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742212_1388 (size=1877034) 2024-12-05T12:06:09,680 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742212_1388 (size=1877034) 2024-12-05T12:06:09,680 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742212_1388 (size=1877034) 2024-12-05T12:06:09,733 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742213_1389 (size=77835) 2024-12-05T12:06:09,734 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742213_1389 (size=77835) 2024-12-05T12:06:09,734 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742213_1389 (size=77835) 2024-12-05T12:06:09,766 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742214_1390 (size=30949) 2024-12-05T12:06:09,766 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742214_1390 (size=30949) 2024-12-05T12:06:09,766 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742214_1390 (size=30949) 2024-12-05T12:06:09,808 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742215_1391 (size=1597213) 2024-12-05T12:06:09,808 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742215_1391 (size=1597213) 2024-12-05T12:06:09,809 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742215_1391 (size=1597213) 2024-12-05T12:06:09,848 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742216_1392 (size=4695811) 2024-12-05T12:06:09,848 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742216_1392 (size=4695811) 2024-12-05T12:06:09,848 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742216_1392 (size=4695811) 2024-12-05T12:06:09,868 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742217_1393 (size=232957) 2024-12-05T12:06:09,868 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742217_1393 (size=232957) 2024-12-05T12:06:09,869 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742217_1393 (size=232957) 2024-12-05T12:06:09,888 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742218_1394 (size=127628) 2024-12-05T12:06:09,888 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742218_1394 (size=127628) 2024-12-05T12:06:09,888 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742218_1394 (size=127628) 2024-12-05T12:06:09,913 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-05T12:06:09,996 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742219_1395 (size=20406) 2024-12-05T12:06:09,996 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742219_1395 (size=20406) 2024-12-05T12:06:09,996 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742219_1395 (size=20406) 2024-12-05T12:06:10,059 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742220_1396 (size=5175431) 2024-12-05T12:06:10,060 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742220_1396 (size=5175431) 2024-12-05T12:06:10,061 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742220_1396 (size=5175431) 2024-12-05T12:06:10,087 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742221_1397 (size=217634) 2024-12-05T12:06:10,087 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742221_1397 (size=217634) 2024-12-05T12:06:10,087 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742221_1397 (size=217634) 2024-12-05T12:06:10,129 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742222_1398 (size=6425023) 2024-12-05T12:06:10,129 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742222_1398 (size=6425023) 2024-12-05T12:06:10,129 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742222_1398 (size=6425023) 2024-12-05T12:06:10,209 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742223_1399 (size=1832290) 2024-12-05T12:06:10,209 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742223_1399 (size=1832290) 2024-12-05T12:06:10,210 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742223_1399 (size=1832290) 2024-12-05T12:06:10,249 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742224_1400 (size=322274) 2024-12-05T12:06:10,249 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742224_1400 (size=322274) 2024-12-05T12:06:10,250 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742224_1400 (size=322274) 2024-12-05T12:06:10,285 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742225_1401 (size=503880) 2024-12-05T12:06:10,285 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742225_1401 (size=503880) 2024-12-05T12:06:10,286 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742225_1401 (size=503880) 2024-12-05T12:06:10,319 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742226_1402 (size=443172) 2024-12-05T12:06:10,319 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742226_1402 (size=443172) 2024-12-05T12:06:10,319 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742226_1402 (size=443172) 2024-12-05T12:06:10,341 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742227_1403 (size=29229) 2024-12-05T12:06:10,342 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742227_1403 (size=29229) 2024-12-05T12:06:10,342 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742227_1403 (size=29229) 2024-12-05T12:06:10,361 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithMergeRegion 2024-12-05T12:06:10,361 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithMergeRegion Metrics about Tables on a single HBase RegionServer 2024-12-05T12:06:10,361 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-05T12:06:10,361 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithMergeRegion-1 Metrics about Tables on a single HBase RegionServer 2024-12-05T12:06:10,362 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testConsecutiveExports 2024-12-05T12:06:10,386 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742228_1404 (size=24096) 2024-12-05T12:06:10,387 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742228_1404 (size=24096) 2024-12-05T12:06:10,388 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742228_1404 (size=24096) 2024-12-05T12:06:10,462 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742229_1405 (size=111872) 2024-12-05T12:06:10,462 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742229_1405 (size=111872) 2024-12-05T12:06:10,463 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742229_1405 (size=111872) 2024-12-05T12:06:10,516 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742230_1406 (size=45609) 2024-12-05T12:06:10,516 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742230_1406 (size=45609) 2024-12-05T12:06:10,518 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742230_1406 (size=45609) 2024-12-05T12:06:10,546 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742231_1407 (size=136454) 2024-12-05T12:06:10,546 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742231_1407 (size=136454) 2024-12-05T12:06:10,547 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742231_1407 (size=136454) 2024-12-05T12:06:10,548 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-12-05T12:06:10,551 INFO [Time-limited test {}] snapshot.ExportSnapshot(663): Loading Snapshot 'snaptb0-testExportFileSystemStateWithMergeRegion-1' hfile list 2024-12-05T12:06:10,553 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=0 size=4.8 K 2024-12-05T12:06:10,553 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=1 size=4.8 K 2024-12-05T12:06:10,599 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742232_1408 (size=481) 2024-12-05T12:06:10,600 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742232_1408 (size=481) 2024-12-05T12:06:10,600 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742232_1408 (size=481) 2024-12-05T12:06:10,628 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742233_1409 (size=21) 2024-12-05T12:06:10,629 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742233_1409 (size=21) 2024-12-05T12:06:10,629 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742233_1409 (size=21) 2024-12-05T12:06:10,659 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742234_1410 (size=304055) 2024-12-05T12:06:10,660 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742234_1410 (size=304055) 2024-12-05T12:06:10,660 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742234_1410 (size=304055) 2024-12-05T12:06:10,690 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-05T12:06:10,690 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-05T12:06:11,461 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733400137807_0007_000001 (auth:SIMPLE) from 127.0.0.1:44218 2024-12-05T12:06:12,833 WARN [regionserver/80666a11a09f:0.Chore.1 {}] hbase.ExecutorStatusChore(69): RS_COMPACTED_FILES_DISCHARGER's size info, queued: 1, running: 1 2024-12-05T12:06:13,809 DEBUG [master/80666a11a09f:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region cac532a447333068e6a2db6ad5aa6d3d changed from -1.0 to 0.0, refreshing cache 2024-12-05T12:06:13,809 DEBUG [master/80666a11a09f:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 52564b4c12a2de511877a196719da4fe changed from -1.0 to 0.0, refreshing cache 2024-12-05T12:06:15,864 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-05T12:06:17,034 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733400137807_0007_000001 (auth:SIMPLE) from 127.0.0.1:56428 2024-12-05T12:06:17,277 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742235_1411 (size=349753) 2024-12-05T12:06:17,277 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742235_1411 (size=349753) 2024-12-05T12:06:17,277 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742235_1411 (size=349753) 2024-12-05T12:06:19,226 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733400137807_0007_000001 (auth:SIMPLE) from 127.0.0.1:59906 2024-12-05T12:06:19,226 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733400137807_0007_000001 (auth:SIMPLE) from 127.0.0.1:60656 2024-12-05T12:06:22,615 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742236_1412 (size=4945) 2024-12-05T12:06:22,615 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742236_1412 (size=4945) 2024-12-05T12:06:22,616 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742236_1412 (size=4945) 2024-12-05T12:06:22,741 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_693627247/yarn-7091037402/MiniMRCluster_693627247-localDir-nm-1_3/usercache/jenkins/appcache/application_1733400137807_0007/container_1733400137807_0007_01_000002/launch_container.sh] 2024-12-05T12:06:22,741 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_693627247/yarn-7091037402/MiniMRCluster_693627247-localDir-nm-1_3/usercache/jenkins/appcache/application_1733400137807_0007/container_1733400137807_0007_01_000002/container_tokens] 2024-12-05T12:06:22,741 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_693627247/yarn-7091037402/MiniMRCluster_693627247-localDir-nm-1_3/usercache/jenkins/appcache/application_1733400137807_0007/container_1733400137807_0007_01_000002/sysfs] 2024-12-05T12:06:23,156 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742238_1414 (size=4945) 2024-12-05T12:06:23,156 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742238_1414 (size=4945) 2024-12-05T12:06:23,156 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742238_1414 (size=4945) 2024-12-05T12:06:23,205 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742237_1413 (size=22243) 2024-12-05T12:06:23,205 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742237_1413 (size=22243) 2024-12-05T12:06:23,206 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742237_1413 (size=22243) 2024-12-05T12:06:23,220 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742239_1415 (size=482) 2024-12-05T12:06:23,221 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742239_1415 (size=482) 2024-12-05T12:06:23,221 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742239_1415 (size=482) 2024-12-05T12:06:23,248 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742240_1416 (size=22243) 2024-12-05T12:06:23,249 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742240_1416 (size=22243) 2024-12-05T12:06:23,249 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742240_1416 (size=22243) 2024-12-05T12:06:23,258 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_693627247/yarn-7091037402/MiniMRCluster_693627247-localDir-nm-0_2/usercache/jenkins/appcache/application_1733400137807_0007/container_1733400137807_0007_01_000003/launch_container.sh] 2024-12-05T12:06:23,258 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_693627247/yarn-7091037402/MiniMRCluster_693627247-localDir-nm-0_2/usercache/jenkins/appcache/application_1733400137807_0007/container_1733400137807_0007_01_000003/container_tokens] 2024-12-05T12:06:23,258 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_693627247/yarn-7091037402/MiniMRCluster_693627247-localDir-nm-0_2/usercache/jenkins/appcache/application_1733400137807_0007/container_1733400137807_0007_01_000003/sysfs] 2024-12-05T12:06:23,264 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742241_1417 (size=349753) 2024-12-05T12:06:23,265 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742241_1417 (size=349753) 2024-12-05T12:06:23,265 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742241_1417 (size=349753) 2024-12-05T12:06:23,278 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733400137807_0007_000001 (auth:SIMPLE) from 127.0.0.1:60670 2024-12-05T12:06:24,827 INFO [Time-limited test {}] snapshot.ExportSnapshot(1219): Finalize the Snapshot Export 2024-12-05T12:06:24,828 INFO [Time-limited test {}] snapshot.ExportSnapshot(1230): Verify the exported snapshot's expiration status and integrity. 2024-12-05T12:06:24,833 INFO [Time-limited test {}] snapshot.ExportSnapshot(1236): Export Completed: snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-12-05T12:06:24,833 INFO [Time-limited test {}] snapshot.TestExportSnapshot(409): Exported snapshot 2024-12-05T12:06:24,834 INFO [Time-limited test {}] snapshot.TestExportSnapshot(420): Verified filesystem state 2024-12-05T12:06:24,834 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-954888707_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1 at hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-12-05T12:06:24,834 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1/.snapshotinfo 2024-12-05T12:06:24,834 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1/data.manifest 2024-12-05T12:06:24,834 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-954888707_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/export-test/export-1733400368318/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1 at hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/export-test/export-1733400368318/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-12-05T12:06:24,835 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/export-test/export-1733400368318/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1/.snapshotinfo 2024-12-05T12:06:24,835 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/export-test/export-1733400368318/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1/data.manifest 2024-12-05T12:06:24,840 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.2 disable testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-05T12:06:24,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] procedure2.ProcedureExecutor(1139): Stored pid=157, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-05T12:06:24,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=157 2024-12-05T12:06:24,844 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion-1","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733400384844"}]},"ts":"1733400384844"} 2024-12-05T12:06:24,846 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithMergeRegion-1, state=DISABLING in hbase:meta 2024-12-05T12:06:24,846 INFO [PEWorker-1 {}] procedure.DisableTableProcedure(284): Set testtb-testExportFileSystemStateWithMergeRegion-1 to state=DISABLING 2024-12-05T12:06:24,847 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=158, ppid=157, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1}] 2024-12-05T12:06:24,848 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=159, ppid=158, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=68a973778e2ec0a6c9355eecf138a149, UNASSIGN}] 2024-12-05T12:06:24,849 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=159, ppid=158, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=68a973778e2ec0a6c9355eecf138a149, UNASSIGN 2024-12-05T12:06:24,850 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=159 updating hbase:meta row=68a973778e2ec0a6c9355eecf138a149, regionState=CLOSING, regionLocation=80666a11a09f,42305,1733400131012 2024-12-05T12:06:24,851 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=159, ppid=158, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=68a973778e2ec0a6c9355eecf138a149, UNASSIGN because future has completed 2024-12-05T12:06:24,851 DEBUG [PEWorker-5 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-05T12:06:24,851 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=160, ppid=159, state=RUNNABLE, hasLock=false; CloseRegionProcedure 68a973778e2ec0a6c9355eecf138a149, server=80666a11a09f,42305,1733400131012}] 2024-12-05T12:06:24,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=157 2024-12-05T12:06:25,005 INFO [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] handler.UnassignRegionHandler(122): Close 68a973778e2ec0a6c9355eecf138a149 2024-12-05T12:06:25,005 DEBUG [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-05T12:06:25,005 DEBUG [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] regionserver.HRegion(1722): Closing 68a973778e2ec0a6c9355eecf138a149, disabling compactions & flushes 2024-12-05T12:06:25,005 INFO [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithMergeRegion-1,,1733400366694.68a973778e2ec0a6c9355eecf138a149. 2024-12-05T12:06:25,006 DEBUG [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithMergeRegion-1,,1733400366694.68a973778e2ec0a6c9355eecf138a149. 2024-12-05T12:06:25,006 DEBUG [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion-1,,1733400366694.68a973778e2ec0a6c9355eecf138a149. after waiting 0 ms 2024-12-05T12:06:25,006 DEBUG [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion-1,,1733400366694.68a973778e2ec0a6c9355eecf138a149. 2024-12-05T12:06:25,017 DEBUG [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/68a973778e2ec0a6c9355eecf138a149/recovered.edits/12.seqid, newMaxSeqId=12, maxSeqId=8 2024-12-05T12:06:25,018 DEBUG [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-05T12:06:25,018 INFO [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithMergeRegion-1,,1733400366694.68a973778e2ec0a6c9355eecf138a149. 2024-12-05T12:06:25,018 DEBUG [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] regionserver.HRegion(1676): Region close journal for 68a973778e2ec0a6c9355eecf138a149: Waiting for close lock at 1733400385005Running coprocessor pre-close hooks at 1733400385005Disabling compacts and flushes for region at 1733400385005Disabling writes for close at 1733400385006 (+1 ms)Writing region close event to WAL at 1733400385008 (+2 ms)Running coprocessor post-close hooks at 1733400385017 (+9 ms)Closed at 1733400385018 (+1 ms) 2024-12-05T12:06:25,019 INFO [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] handler.UnassignRegionHandler(157): Closed 68a973778e2ec0a6c9355eecf138a149 2024-12-05T12:06:25,020 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=159 updating hbase:meta row=68a973778e2ec0a6c9355eecf138a149, regionState=CLOSED 2024-12-05T12:06:25,022 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=160, ppid=159, state=RUNNABLE, hasLock=false; CloseRegionProcedure 68a973778e2ec0a6c9355eecf138a149, server=80666a11a09f,42305,1733400131012 because future has completed 2024-12-05T12:06:25,024 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=160, resume processing ppid=159 2024-12-05T12:06:25,025 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=160, ppid=159, state=SUCCESS, hasLock=false; CloseRegionProcedure 68a973778e2ec0a6c9355eecf138a149, server=80666a11a09f,42305,1733400131012 in 171 msec 2024-12-05T12:06:25,026 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=159, resume processing ppid=158 2024-12-05T12:06:25,026 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=159, ppid=158, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=68a973778e2ec0a6c9355eecf138a149, UNASSIGN in 176 msec 2024-12-05T12:06:25,028 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=158, resume processing ppid=157 2024-12-05T12:06:25,028 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=158, ppid=157, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 in 180 msec 2024-12-05T12:06:25,029 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion-1","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733400385029"}]},"ts":"1733400385029"} 2024-12-05T12:06:25,031 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithMergeRegion-1, state=DISABLED in hbase:meta 2024-12-05T12:06:25,031 INFO [PEWorker-5 {}] procedure.DisableTableProcedure(296): Set testtb-testExportFileSystemStateWithMergeRegion-1 to state=DISABLED 2024-12-05T12:06:25,032 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=157, state=SUCCESS, hasLock=false; DisableTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 in 191 msec 2024-12-05T12:06:25,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=157 2024-12-05T12:06:25,158 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion-1 completed 2024-12-05T12:06:25,158 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.2 delete testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-05T12:06:25,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] procedure2.ProcedureExecutor(1139): Stored pid=161, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-05T12:06:25,160 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=161, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-05T12:06:25,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] access.PermissionStorage(261): Removing permissions of removed table testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-05T12:06:25,161 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=161, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-05T12:06:25,163 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/68a973778e2ec0a6c9355eecf138a149 2024-12-05T12:06:25,163 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/3389d73927f32bc82a09c7249b8729b4 2024-12-05T12:06:25,163 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42305 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-05T12:06:25,163 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/ec786c79ff70ab9414a05ecd6c343e10 2024-12-05T12:06:25,164 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/3389d73927f32bc82a09c7249b8729b4/cf, FileablePath, hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/3389d73927f32bc82a09c7249b8729b4/recovered.edits] 2024-12-05T12:06:25,165 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/68a973778e2ec0a6c9355eecf138a149/cf, FileablePath, hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/68a973778e2ec0a6c9355eecf138a149/recovered.edits] 2024-12-05T12:06:25,165 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/ec786c79ff70ab9414a05ecd6c343e10/cf, FileablePath, hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/ec786c79ff70ab9414a05ecd6c343e10/recovered.edits] 2024-12-05T12:06:25,168 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/3389d73927f32bc82a09c7249b8729b4/cf/8d310a29f11f468ca95f2da88d1c5019 to hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/3389d73927f32bc82a09c7249b8729b4/cf/8d310a29f11f468ca95f2da88d1c5019 2024-12-05T12:06:25,168 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/ec786c79ff70ab9414a05ecd6c343e10/cf/8792523f1d144fdcb7d36595171cd14d to hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/ec786c79ff70ab9414a05ecd6c343e10/cf/8792523f1d144fdcb7d36595171cd14d 2024-12-05T12:06:25,168 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/68a973778e2ec0a6c9355eecf138a149/cf/8792523f1d144fdcb7d36595171cd14d.ec786c79ff70ab9414a05ecd6c343e10 to hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/68a973778e2ec0a6c9355eecf138a149/cf/8792523f1d144fdcb7d36595171cd14d.ec786c79ff70ab9414a05ecd6c343e10 2024-12-05T12:06:25,170 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/68a973778e2ec0a6c9355eecf138a149/cf/8d310a29f11f468ca95f2da88d1c5019.3389d73927f32bc82a09c7249b8729b4 to hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/68a973778e2ec0a6c9355eecf138a149/cf/8d310a29f11f468ca95f2da88d1c5019.3389d73927f32bc82a09c7249b8729b4 2024-12-05T12:06:25,171 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/ec786c79ff70ab9414a05ecd6c343e10/recovered.edits/8.seqid to hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/ec786c79ff70ab9414a05ecd6c343e10/recovered.edits/8.seqid 2024-12-05T12:06:25,171 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/3389d73927f32bc82a09c7249b8729b4/recovered.edits/8.seqid to hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/3389d73927f32bc82a09c7249b8729b4/recovered.edits/8.seqid 2024-12-05T12:06:25,172 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/ec786c79ff70ab9414a05ecd6c343e10 2024-12-05T12:06:25,172 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/3389d73927f32bc82a09c7249b8729b4 2024-12-05T12:06:25,173 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/68a973778e2ec0a6c9355eecf138a149/recovered.edits/12.seqid to hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/68a973778e2ec0a6c9355eecf138a149/recovered.edits/12.seqid 2024-12-05T12:06:25,173 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/68a973778e2ec0a6c9355eecf138a149 2024-12-05T12:06:25,173 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(313): Archived testtb-testExportFileSystemStateWithMergeRegion-1 regions 2024-12-05T12:06:25,175 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=161, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-05T12:06:25,178 WARN [PEWorker-1 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 1 rows of testtb-testExportFileSystemStateWithMergeRegion-1 from hbase:meta 2024-12-05T12:06:25,180 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(407): Removing 'testtb-testExportFileSystemStateWithMergeRegion-1' descriptor. 2024-12-05T12:06:25,181 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=161, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-05T12:06:25,181 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(397): Removing 'testtb-testExportFileSystemStateWithMergeRegion-1' from region states. 2024-12-05T12:06:25,181 DEBUG [PEWorker-1 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion-1,,1733400366694.68a973778e2ec0a6c9355eecf138a149.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733400385181"}]},"ts":"9223372036854775807"} 2024-12-05T12:06:25,183 INFO [PEWorker-1 {}] assignment.RegionStateStore(562): Deleted 1 regions from META 2024-12-05T12:06:25,183 DEBUG [PEWorker-1 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => 68a973778e2ec0a6c9355eecf138a149, NAME => 'testtb-testExportFileSystemStateWithMergeRegion-1,,1733400366694.68a973778e2ec0a6c9355eecf138a149.', STARTKEY => '', ENDKEY => ''}] 2024-12-05T12:06:25,183 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(401): Marking 'testtb-testExportFileSystemStateWithMergeRegion-1' as deleted. 2024-12-05T12:06:25,184 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion-1","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733400385183"}]},"ts":"9223372036854775807"} 2024-12-05T12:06:25,185 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(867): Deleted table testtb-testExportFileSystemStateWithMergeRegion-1 state from META 2024-12-05T12:06:25,186 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(133): Finished pid=161, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-05T12:06:25,187 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=161, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 in 28 msec 2024-12-05T12:06:25,236 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37629-0x101a6a890970002, quorum=127.0.0.1:49240, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-05T12:06:25,236 DEBUG [pool-63-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42913-0x101a6a890970001, quorum=127.0.0.1:49240, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-05T12:06:25,236 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42305-0x101a6a890970003, quorum=127.0.0.1:49240, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-05T12:06:25,236 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34947-0x101a6a890970000, quorum=127.0.0.1:49240, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-05T12:06:25,237 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion-1 with data PBUF 2024-12-05T12:06:25,237 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion-1 with data PBUF 2024-12-05T12:06:25,237 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion-1 with data PBUF 2024-12-05T12:06:25,237 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion-1 with data PBUF 2024-12-05T12:06:25,244 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34947-0x101a6a890970000, quorum=127.0.0.1:49240, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-05T12:06:25,244 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42305-0x101a6a890970003, quorum=127.0.0.1:49240, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-05T12:06:25,244 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37629-0x101a6a890970002, quorum=127.0.0.1:49240, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-05T12:06:25,244 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42305-0x101a6a890970003, quorum=127.0.0.1:49240, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-05T12:06:25,244 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34947-0x101a6a890970000, quorum=127.0.0.1:49240, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-05T12:06:25,244 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37629-0x101a6a890970002, quorum=127.0.0.1:49240, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-05T12:06:25,244 DEBUG [pool-63-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42913-0x101a6a890970001, quorum=127.0.0.1:49240, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-05T12:06:25,244 DEBUG [pool-63-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42913-0x101a6a890970001, quorum=127.0.0.1:49240, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-05T12:06:25,245 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-12-05T12:06:25,245 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-12-05T12:06:25,245 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-12-05T12:06:25,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=161 2024-12-05T12:06:25,245 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-12-05T12:06:25,245 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-05T12:06:25,245 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion-1 completed 2024-12-05T12:06:25,246 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.2 disable testtb-testExportFileSystemStateWithMergeRegion 2024-12-05T12:06:25,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] procedure2.ProcedureExecutor(1139): Stored pid=162, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion 2024-12-05T12:06:25,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=162 2024-12-05T12:06:25,248 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733400385248"}]},"ts":"1733400385248"} 2024-12-05T12:06:25,250 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithMergeRegion, state=DISABLING in hbase:meta 2024-12-05T12:06:25,250 INFO [PEWorker-2 {}] procedure.DisableTableProcedure(284): Set testtb-testExportFileSystemStateWithMergeRegion to state=DISABLING 2024-12-05T12:06:25,250 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=163, ppid=162, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion}] 2024-12-05T12:06:25,252 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=164, ppid=163, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=cac532a447333068e6a2db6ad5aa6d3d, UNASSIGN}, {pid=165, ppid=163, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=52564b4c12a2de511877a196719da4fe, UNASSIGN}] 2024-12-05T12:06:25,252 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=165, ppid=163, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=52564b4c12a2de511877a196719da4fe, UNASSIGN 2024-12-05T12:06:25,252 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=164, ppid=163, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=cac532a447333068e6a2db6ad5aa6d3d, UNASSIGN 2024-12-05T12:06:25,253 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=164 updating hbase:meta row=cac532a447333068e6a2db6ad5aa6d3d, regionState=CLOSING, regionLocation=80666a11a09f,42305,1733400131012 2024-12-05T12:06:25,253 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=165 updating hbase:meta row=52564b4c12a2de511877a196719da4fe, regionState=CLOSING, regionLocation=80666a11a09f,42913,1733400130845 2024-12-05T12:06:25,254 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=164, ppid=163, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=cac532a447333068e6a2db6ad5aa6d3d, UNASSIGN because future has completed 2024-12-05T12:06:25,254 DEBUG [PEWorker-1 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-05T12:06:25,254 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=166, ppid=164, state=RUNNABLE, hasLock=false; CloseRegionProcedure cac532a447333068e6a2db6ad5aa6d3d, server=80666a11a09f,42305,1733400131012}] 2024-12-05T12:06:25,255 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=165, ppid=163, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=52564b4c12a2de511877a196719da4fe, UNASSIGN because future has completed 2024-12-05T12:06:25,255 DEBUG [PEWorker-2 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-05T12:06:25,255 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=167, ppid=165, state=RUNNABLE, hasLock=false; CloseRegionProcedure 52564b4c12a2de511877a196719da4fe, server=80666a11a09f,42913,1733400130845}] 2024-12-05T12:06:25,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=162 2024-12-05T12:06:25,407 INFO [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=166}] handler.UnassignRegionHandler(122): Close cac532a447333068e6a2db6ad5aa6d3d 2024-12-05T12:06:25,407 DEBUG [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=166}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-05T12:06:25,407 DEBUG [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=166}] regionserver.HRegion(1722): Closing cac532a447333068e6a2db6ad5aa6d3d, disabling compactions & flushes 2024-12-05T12:06:25,407 INFO [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=166}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithMergeRegion,,1733400364686.cac532a447333068e6a2db6ad5aa6d3d. 2024-12-05T12:06:25,407 DEBUG [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=166}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithMergeRegion,,1733400364686.cac532a447333068e6a2db6ad5aa6d3d. 2024-12-05T12:06:25,407 DEBUG [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=166}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion,,1733400364686.cac532a447333068e6a2db6ad5aa6d3d. after waiting 0 ms 2024-12-05T12:06:25,407 DEBUG [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=166}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion,,1733400364686.cac532a447333068e6a2db6ad5aa6d3d. 2024-12-05T12:06:25,408 INFO [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=167}] handler.UnassignRegionHandler(122): Close 52564b4c12a2de511877a196719da4fe 2024-12-05T12:06:25,408 DEBUG [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=167}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-05T12:06:25,408 DEBUG [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=167}] regionserver.HRegion(1722): Closing 52564b4c12a2de511877a196719da4fe, disabling compactions & flushes 2024-12-05T12:06:25,408 INFO [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=167}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithMergeRegion,1,1733400364686.52564b4c12a2de511877a196719da4fe. 2024-12-05T12:06:25,408 DEBUG [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=167}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithMergeRegion,1,1733400364686.52564b4c12a2de511877a196719da4fe. 2024-12-05T12:06:25,408 DEBUG [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=167}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion,1,1733400364686.52564b4c12a2de511877a196719da4fe. after waiting 0 ms 2024-12-05T12:06:25,408 DEBUG [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=167}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion,1,1733400364686.52564b4c12a2de511877a196719da4fe. 2024-12-05T12:06:25,410 DEBUG [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=166}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportFileSystemStateWithMergeRegion/cac532a447333068e6a2db6ad5aa6d3d/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-05T12:06:25,411 DEBUG [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=167}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportFileSystemStateWithMergeRegion/52564b4c12a2de511877a196719da4fe/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-05T12:06:25,411 DEBUG [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=166}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-05T12:06:25,411 INFO [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=166}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithMergeRegion,,1733400364686.cac532a447333068e6a2db6ad5aa6d3d. 2024-12-05T12:06:25,411 DEBUG [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=166}] regionserver.HRegion(1676): Region close journal for cac532a447333068e6a2db6ad5aa6d3d: Waiting for close lock at 1733400385407Running coprocessor pre-close hooks at 1733400385407Disabling compacts and flushes for region at 1733400385407Disabling writes for close at 1733400385407Writing region close event to WAL at 1733400385408 (+1 ms)Running coprocessor post-close hooks at 1733400385411 (+3 ms)Closed at 1733400385411 2024-12-05T12:06:25,411 DEBUG [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=167}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-05T12:06:25,411 INFO [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=167}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithMergeRegion,1,1733400364686.52564b4c12a2de511877a196719da4fe. 2024-12-05T12:06:25,411 DEBUG [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=167}] regionserver.HRegion(1676): Region close journal for 52564b4c12a2de511877a196719da4fe: Waiting for close lock at 1733400385408Running coprocessor pre-close hooks at 1733400385408Disabling compacts and flushes for region at 1733400385408Disabling writes for close at 1733400385408Writing region close event to WAL at 1733400385408Running coprocessor post-close hooks at 1733400385411 (+3 ms)Closed at 1733400385411 2024-12-05T12:06:25,413 INFO [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=166}] handler.UnassignRegionHandler(157): Closed cac532a447333068e6a2db6ad5aa6d3d 2024-12-05T12:06:25,413 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=164 updating hbase:meta row=cac532a447333068e6a2db6ad5aa6d3d, regionState=CLOSED 2024-12-05T12:06:25,413 INFO [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=167}] handler.UnassignRegionHandler(157): Closed 52564b4c12a2de511877a196719da4fe 2024-12-05T12:06:25,414 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=165 updating hbase:meta row=52564b4c12a2de511877a196719da4fe, regionState=CLOSED 2024-12-05T12:06:25,415 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=166, ppid=164, state=RUNNABLE, hasLock=false; CloseRegionProcedure cac532a447333068e6a2db6ad5aa6d3d, server=80666a11a09f,42305,1733400131012 because future has completed 2024-12-05T12:06:25,415 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=167, ppid=165, state=RUNNABLE, hasLock=false; CloseRegionProcedure 52564b4c12a2de511877a196719da4fe, server=80666a11a09f,42913,1733400130845 because future has completed 2024-12-05T12:06:25,417 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=166, resume processing ppid=164 2024-12-05T12:06:25,417 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=166, ppid=164, state=SUCCESS, hasLock=false; CloseRegionProcedure cac532a447333068e6a2db6ad5aa6d3d, server=80666a11a09f,42305,1733400131012 in 161 msec 2024-12-05T12:06:25,418 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=167, resume processing ppid=165 2024-12-05T12:06:25,418 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=167, ppid=165, state=SUCCESS, hasLock=false; CloseRegionProcedure 52564b4c12a2de511877a196719da4fe, server=80666a11a09f,42913,1733400130845 in 161 msec 2024-12-05T12:06:25,418 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=164, ppid=163, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=cac532a447333068e6a2db6ad5aa6d3d, UNASSIGN in 165 msec 2024-12-05T12:06:25,419 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=165, resume processing ppid=163 2024-12-05T12:06:25,419 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=165, ppid=163, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=52564b4c12a2de511877a196719da4fe, UNASSIGN in 166 msec 2024-12-05T12:06:25,421 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=163, resume processing ppid=162 2024-12-05T12:06:25,421 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=163, ppid=162, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion in 169 msec 2024-12-05T12:06:25,421 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733400385421"}]},"ts":"1733400385421"} 2024-12-05T12:06:25,423 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithMergeRegion, state=DISABLED in hbase:meta 2024-12-05T12:06:25,423 INFO [PEWorker-2 {}] procedure.DisableTableProcedure(296): Set testtb-testExportFileSystemStateWithMergeRegion to state=DISABLED 2024-12-05T12:06:25,424 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=162, state=SUCCESS, hasLock=false; DisableTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion in 177 msec 2024-12-05T12:06:25,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=162 2024-12-05T12:06:25,568 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion completed 2024-12-05T12:06:25,569 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.2 delete testtb-testExportFileSystemStateWithMergeRegion 2024-12-05T12:06:25,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] procedure2.ProcedureExecutor(1139): Stored pid=168, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion 2024-12-05T12:06:25,571 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=168, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion 2024-12-05T12:06:25,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] access.PermissionStorage(261): Removing permissions of removed table testtb-testExportFileSystemStateWithMergeRegion 2024-12-05T12:06:25,572 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=168, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion 2024-12-05T12:06:25,574 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42305 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testtb-testExportFileSystemStateWithMergeRegion 2024-12-05T12:06:25,576 DEBUG [HFileArchiver-19 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportFileSystemStateWithMergeRegion/cac532a447333068e6a2db6ad5aa6d3d 2024-12-05T12:06:25,576 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportFileSystemStateWithMergeRegion/52564b4c12a2de511877a196719da4fe 2024-12-05T12:06:25,578 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportFileSystemStateWithMergeRegion/52564b4c12a2de511877a196719da4fe/cf, FileablePath, hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportFileSystemStateWithMergeRegion/52564b4c12a2de511877a196719da4fe/recovered.edits] 2024-12-05T12:06:25,578 DEBUG [HFileArchiver-19 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportFileSystemStateWithMergeRegion/cac532a447333068e6a2db6ad5aa6d3d/cf, FileablePath, hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportFileSystemStateWithMergeRegion/cac532a447333068e6a2db6ad5aa6d3d/recovered.edits] 2024-12-05T12:06:25,581 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportFileSystemStateWithMergeRegion/52564b4c12a2de511877a196719da4fe/cf/a3ca7ef33d46498e9c8e771f307e3fe5 to hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion/52564b4c12a2de511877a196719da4fe/cf/a3ca7ef33d46498e9c8e771f307e3fe5 2024-12-05T12:06:25,581 DEBUG [HFileArchiver-19 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportFileSystemStateWithMergeRegion/cac532a447333068e6a2db6ad5aa6d3d/cf/17a00bfbf5b7419f9fc36d0ed7657846 to hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion/cac532a447333068e6a2db6ad5aa6d3d/cf/17a00bfbf5b7419f9fc36d0ed7657846 2024-12-05T12:06:25,583 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportFileSystemStateWithMergeRegion/52564b4c12a2de511877a196719da4fe/recovered.edits/9.seqid to hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion/52564b4c12a2de511877a196719da4fe/recovered.edits/9.seqid 2024-12-05T12:06:25,583 DEBUG [HFileArchiver-19 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportFileSystemStateWithMergeRegion/cac532a447333068e6a2db6ad5aa6d3d/recovered.edits/9.seqid to hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion/cac532a447333068e6a2db6ad5aa6d3d/recovered.edits/9.seqid 2024-12-05T12:06:25,583 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportFileSystemStateWithMergeRegion/52564b4c12a2de511877a196719da4fe 2024-12-05T12:06:25,583 DEBUG [HFileArchiver-19 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportFileSystemStateWithMergeRegion/cac532a447333068e6a2db6ad5aa6d3d 2024-12-05T12:06:25,583 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(313): Archived testtb-testExportFileSystemStateWithMergeRegion regions 2024-12-05T12:06:25,583 DEBUG [PEWorker-3 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/mobdir/data/default/testtb-testExportFileSystemStateWithMergeRegion/aaca68e56e63a701f5c1ec9dc2f0511c 2024-12-05T12:06:25,584 DEBUG [PEWorker-3 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/mobdir/data/default/testtb-testExportFileSystemStateWithMergeRegion/aaca68e56e63a701f5c1ec9dc2f0511c/cf] 2024-12-05T12:06:25,586 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/mobdir/data/default/testtb-testExportFileSystemStateWithMergeRegion/aaca68e56e63a701f5c1ec9dc2f0511c/cf/c4ca4238a0b923820dcc509a6f75849b202412051c5e65207e454f83b70d1ed136604c5c_52564b4c12a2de511877a196719da4fe to hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion/aaca68e56e63a701f5c1ec9dc2f0511c/cf/c4ca4238a0b923820dcc509a6f75849b202412051c5e65207e454f83b70d1ed136604c5c_52564b4c12a2de511877a196719da4fe 2024-12-05T12:06:25,587 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/mobdir/data/default/testtb-testExportFileSystemStateWithMergeRegion/aaca68e56e63a701f5c1ec9dc2f0511c/cf/d41d8cd98f00b204e9800998ecf8427e20241205fe4ca56a1d04466ba2d67ff34b9ba85b_cac532a447333068e6a2db6ad5aa6d3d to hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion/aaca68e56e63a701f5c1ec9dc2f0511c/cf/d41d8cd98f00b204e9800998ecf8427e20241205fe4ca56a1d04466ba2d67ff34b9ba85b_cac532a447333068e6a2db6ad5aa6d3d 2024-12-05T12:06:25,588 DEBUG [PEWorker-3 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/mobdir/data/default/testtb-testExportFileSystemStateWithMergeRegion/aaca68e56e63a701f5c1ec9dc2f0511c 2024-12-05T12:06:25,589 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=168, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion 2024-12-05T12:06:25,591 WARN [PEWorker-3 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 2 rows of testtb-testExportFileSystemStateWithMergeRegion from hbase:meta 2024-12-05T12:06:25,593 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(407): Removing 'testtb-testExportFileSystemStateWithMergeRegion' descriptor. 2024-12-05T12:06:25,594 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34947-0x101a6a890970000, quorum=127.0.0.1:49240, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion 2024-12-05T12:06:25,594 DEBUG [pool-63-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42913-0x101a6a890970001, quorum=127.0.0.1:49240, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion 2024-12-05T12:06:25,594 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37629-0x101a6a890970002, quorum=127.0.0.1:49240, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion 2024-12-05T12:06:25,594 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42305-0x101a6a890970003, quorum=127.0.0.1:49240, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion 2024-12-05T12:06:25,595 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF 2024-12-05T12:06:25,595 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF 2024-12-05T12:06:25,595 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=168, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion 2024-12-05T12:06:25,595 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF 2024-12-05T12:06:25,595 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF 2024-12-05T12:06:25,595 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(397): Removing 'testtb-testExportFileSystemStateWithMergeRegion' from region states. 2024-12-05T12:06:25,595 DEBUG [PEWorker-3 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion,,1733400364686.cac532a447333068e6a2db6ad5aa6d3d.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733400385595"}]},"ts":"9223372036854775807"} 2024-12-05T12:06:25,595 DEBUG [PEWorker-3 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion,1,1733400364686.52564b4c12a2de511877a196719da4fe.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733400385595"}]},"ts":"9223372036854775807"} 2024-12-05T12:06:25,597 INFO [PEWorker-3 {}] assignment.RegionStateStore(562): Deleted 2 regions from META 2024-12-05T12:06:25,597 DEBUG [PEWorker-3 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => cac532a447333068e6a2db6ad5aa6d3d, NAME => 'testtb-testExportFileSystemStateWithMergeRegion,,1733400364686.cac532a447333068e6a2db6ad5aa6d3d.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => 52564b4c12a2de511877a196719da4fe, NAME => 'testtb-testExportFileSystemStateWithMergeRegion,1,1733400364686.52564b4c12a2de511877a196719da4fe.', STARTKEY => '1', ENDKEY => ''}] 2024-12-05T12:06:25,597 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(401): Marking 'testtb-testExportFileSystemStateWithMergeRegion' as deleted. 2024-12-05T12:06:25,597 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733400385597"}]},"ts":"9223372036854775807"} 2024-12-05T12:06:25,599 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(867): Deleted table testtb-testExportFileSystemStateWithMergeRegion state from META 2024-12-05T12:06:25,599 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(133): Finished pid=168, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion 2024-12-05T12:06:25,600 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=168, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion in 30 msec 2024-12-05T12:06:25,602 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37629-0x101a6a890970002, quorum=127.0.0.1:49240, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion 2024-12-05T12:06:25,602 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34947-0x101a6a890970000, quorum=127.0.0.1:49240, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion 2024-12-05T12:06:25,602 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42305-0x101a6a890970003, quorum=127.0.0.1:49240, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion 2024-12-05T12:06:25,602 DEBUG [pool-63-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42913-0x101a6a890970001, quorum=127.0.0.1:49240, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion 2024-12-05T12:06:25,602 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37629-0x101a6a890970002, quorum=127.0.0.1:49240, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-05T12:06:25,602 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34947-0x101a6a890970000, quorum=127.0.0.1:49240, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-05T12:06:25,602 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42305-0x101a6a890970003, quorum=127.0.0.1:49240, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-05T12:06:25,602 DEBUG [pool-63-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42913-0x101a6a890970001, quorum=127.0.0.1:49240, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-05T12:06:25,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=168 2024-12-05T12:06:25,604 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testtb-testExportFileSystemStateWithMergeRegion 2024-12-05T12:06:25,604 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion completed 2024-12-05T12:06:25,610 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "emptySnaptb0-testExportFileSystemStateWithMergeRegion" type: DISABLED 2024-12-05T12:06:25,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] snapshot.SnapshotManager(381): Deleting snapshot: emptySnaptb0-testExportFileSystemStateWithMergeRegion 2024-12-05T12:06:25,614 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "snaptb0-testExportFileSystemStateWithMergeRegion" type: DISABLED 2024-12-05T12:06:25,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] snapshot.SnapshotManager(381): Deleting snapshot: snaptb0-testExportFileSystemStateWithMergeRegion 2024-12-05T12:06:25,617 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "snaptb0-testExportFileSystemStateWithMergeRegion-1" type: DISABLED 2024-12-05T12:06:25,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] snapshot.SnapshotManager(381): Deleting snapshot: snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-12-05T12:06:25,638 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestMobSecureExportSnapshot#testExportFileSystemStateWithMergeRegion Thread=819 (was 809) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-945301731_1 at /127.0.0.1:46826 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-954888707_22 at /127.0.0.1:46870 [Waiting for operation #3] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-20 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-16 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1055630025) connection to localhost/127.0.0.1:39835 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:40729 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-954888707_22 at /127.0.0.1:39000 [Waiting for operation #4] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Thread-6410 java.base@17.0.11/java.io.FileInputStream.readBytes(Native Method) java.base@17.0.11/java.io.FileInputStream.read(FileInputStream.java:276) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:282) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/sun.nio.cs.StreamDecoder.readBytes(StreamDecoder.java:281) java.base@17.0.11/sun.nio.cs.StreamDecoder.implRead(StreamDecoder.java:324) java.base@17.0.11/sun.nio.cs.StreamDecoder.read(StreamDecoder.java:189) java.base@17.0.11/java.io.InputStreamReader.read(InputStreamReader.java:177) java.base@17.0.11/java.io.BufferedReader.fill(BufferedReader.java:162) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:329) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:396) app//org.apache.hadoop.util.Shell$1.run(Shell.java:1025) Potentially hanging thread: HFileArchiver-18 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-17 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-945301731_1 at /127.0.0.1:38978 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1055630025) connection to localhost/127.0.0.1:40729 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-954888707_22 at /127.0.0.1:38770 [Waiting for operation #7] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: process reaper (pid 149509) java.base@17.0.11/java.lang.ProcessHandleImpl.waitForProcessExit0(Native Method) java.base@17.0.11/java.lang.ProcessHandleImpl$1.run(ProcessHandleImpl.java:150) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-19 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #11 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #12 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=825 (was 803) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=735 (was 872), ProcessCount=14 (was 17), AvailableMemoryMB=2822 (was 2747) - AvailableMemoryMB LEAK? - 2024-12-05T12:06:25,638 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=819 is superior to 500 2024-12-05T12:06:25,655 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestMobSecureExportSnapshot#testExportExpiredSnapshot Thread=819, OpenFileDescriptor=825, MaxFileDescriptor=1048576, SystemLoadAverage=735, ProcessCount=14, AvailableMemoryMB=2821 2024-12-05T12:06:25,655 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=819 is superior to 500 2024-12-05T12:06:25,657 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'testtb-testExportExpiredSnapshot', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-05T12:06:25,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] procedure2.ProcedureExecutor(1139): Stored pid=169, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testtb-testExportExpiredSnapshot 2024-12-05T12:06:25,658 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=169, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportExpiredSnapshot execute state=CREATE_TABLE_PRE_OPERATION 2024-12-05T12:06:25,659 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testExportExpiredSnapshot" procId is: 169 2024-12-05T12:06:25,659 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=169, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testtb-testExportExpiredSnapshot execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-05T12:06:25,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=169 2024-12-05T12:06:25,665 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742242_1418 (size=443) 2024-12-05T12:06:25,665 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742242_1418 (size=443) 2024-12-05T12:06:25,665 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742242_1418 (size=443) 2024-12-05T12:06:25,666 INFO [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 32910da2d48395a4bc0ab1f392e25ded, NAME => 'testtb-testExportExpiredSnapshot,,1733400385656.32910da2d48395a4bc0ab1f392e25ded.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testExportExpiredSnapshot', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300 2024-12-05T12:06:25,667 INFO [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => ef081bb0e17d22aa06240dd7c58dc8f6, NAME => 'testtb-testExportExpiredSnapshot,1,1733400385656.ef081bb0e17d22aa06240dd7c58dc8f6.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testExportExpiredSnapshot', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300 2024-12-05T12:06:25,676 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742244_1420 (size=68) 2024-12-05T12:06:25,676 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742244_1420 (size=68) 2024-12-05T12:06:25,677 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742243_1419 (size=68) 2024-12-05T12:06:25,677 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742243_1419 (size=68) 2024-12-05T12:06:25,677 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742243_1419 (size=68) 2024-12-05T12:06:25,677 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742244_1420 (size=68) 2024-12-05T12:06:25,677 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(898): Instantiated testtb-testExportExpiredSnapshot,,1733400385656.32910da2d48395a4bc0ab1f392e25ded.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-05T12:06:25,677 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(898): Instantiated testtb-testExportExpiredSnapshot,1,1733400385656.ef081bb0e17d22aa06240dd7c58dc8f6.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-05T12:06:25,678 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1722): Closing 32910da2d48395a4bc0ab1f392e25ded, disabling compactions & flushes 2024-12-05T12:06:25,678 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1722): Closing ef081bb0e17d22aa06240dd7c58dc8f6, disabling compactions & flushes 2024-12-05T12:06:25,678 INFO [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1755): Closing region testtb-testExportExpiredSnapshot,,1733400385656.32910da2d48395a4bc0ab1f392e25ded. 2024-12-05T12:06:25,678 INFO [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1755): Closing region testtb-testExportExpiredSnapshot,1,1733400385656.ef081bb0e17d22aa06240dd7c58dc8f6. 2024-12-05T12:06:25,678 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportExpiredSnapshot,,1733400385656.32910da2d48395a4bc0ab1f392e25ded. 2024-12-05T12:06:25,678 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportExpiredSnapshot,1,1733400385656.ef081bb0e17d22aa06240dd7c58dc8f6. 2024-12-05T12:06:25,678 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportExpiredSnapshot,,1733400385656.32910da2d48395a4bc0ab1f392e25ded. after waiting 0 ms 2024-12-05T12:06:25,678 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportExpiredSnapshot,1,1733400385656.ef081bb0e17d22aa06240dd7c58dc8f6. after waiting 0 ms 2024-12-05T12:06:25,678 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportExpiredSnapshot,,1733400385656.32910da2d48395a4bc0ab1f392e25ded. 2024-12-05T12:06:25,678 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportExpiredSnapshot,1,1733400385656.ef081bb0e17d22aa06240dd7c58dc8f6. 2024-12-05T12:06:25,678 INFO [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1973): Closed testtb-testExportExpiredSnapshot,,1733400385656.32910da2d48395a4bc0ab1f392e25ded. 2024-12-05T12:06:25,678 INFO [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1973): Closed testtb-testExportExpiredSnapshot,1,1733400385656.ef081bb0e17d22aa06240dd7c58dc8f6. 2024-12-05T12:06:25,678 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1676): Region close journal for 32910da2d48395a4bc0ab1f392e25ded: Waiting for close lock at 1733400385677Disabling compacts and flushes for region at 1733400385677Disabling writes for close at 1733400385678 (+1 ms)Writing region close event to WAL at 1733400385678Closed at 1733400385678 2024-12-05T12:06:25,678 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1676): Region close journal for ef081bb0e17d22aa06240dd7c58dc8f6: Waiting for close lock at 1733400385677Disabling compacts and flushes for region at 1733400385677Disabling writes for close at 1733400385678 (+1 ms)Writing region close event to WAL at 1733400385678Closed at 1733400385678 2024-12-05T12:06:25,679 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=169, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testtb-testExportExpiredSnapshot execute state=CREATE_TABLE_ADD_TO_META 2024-12-05T12:06:25,679 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportExpiredSnapshot,,1733400385656.32910da2d48395a4bc0ab1f392e25ded.","families":{"info":[{"qualifier":"regioninfo","vlen":67,"tag":[],"timestamp":"1733400385679"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733400385679"}]},"ts":"1733400385679"} 2024-12-05T12:06:25,679 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportExpiredSnapshot,1,1733400385656.ef081bb0e17d22aa06240dd7c58dc8f6.","families":{"info":[{"qualifier":"regioninfo","vlen":67,"tag":[],"timestamp":"1733400385679"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733400385679"}]},"ts":"1733400385679"} 2024-12-05T12:06:25,681 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-12-05T12:06:25,681 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=169, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testtb-testExportExpiredSnapshot execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-05T12:06:25,681 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportExpiredSnapshot","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733400385681"}]},"ts":"1733400385681"} 2024-12-05T12:06:25,683 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportExpiredSnapshot, state=ENABLING in hbase:meta 2024-12-05T12:06:25,683 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(204): Hosts are {80666a11a09f=0} racks are {/default-rack=0} 2024-12-05T12:06:25,684 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-05T12:06:25,684 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-05T12:06:25,684 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-05T12:06:25,684 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-05T12:06:25,684 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-05T12:06:25,684 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-05T12:06:25,684 INFO [PEWorker-4 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-05T12:06:25,684 INFO [PEWorker-4 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-05T12:06:25,684 INFO [PEWorker-4 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-05T12:06:25,684 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-05T12:06:25,684 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=170, ppid=169, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=32910da2d48395a4bc0ab1f392e25ded, ASSIGN}, {pid=171, ppid=169, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=ef081bb0e17d22aa06240dd7c58dc8f6, ASSIGN}] 2024-12-05T12:06:25,685 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=170, ppid=169, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=32910da2d48395a4bc0ab1f392e25ded, ASSIGN 2024-12-05T12:06:25,685 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=171, ppid=169, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=ef081bb0e17d22aa06240dd7c58dc8f6, ASSIGN 2024-12-05T12:06:25,686 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(269): Starting pid=170, ppid=169, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=32910da2d48395a4bc0ab1f392e25ded, ASSIGN; state=OFFLINE, location=80666a11a09f,42913,1733400130845; forceNewPlan=false, retain=false 2024-12-05T12:06:25,686 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(269): Starting pid=171, ppid=169, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=ef081bb0e17d22aa06240dd7c58dc8f6, ASSIGN; state=OFFLINE, location=80666a11a09f,42305,1733400131012; forceNewPlan=false, retain=false 2024-12-05T12:06:25,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=169 2024-12-05T12:06:25,836 INFO [80666a11a09f:34947 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-12-05T12:06:25,837 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=171 updating hbase:meta row=ef081bb0e17d22aa06240dd7c58dc8f6, regionState=OPENING, regionLocation=80666a11a09f,42305,1733400131012 2024-12-05T12:06:25,837 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=170 updating hbase:meta row=32910da2d48395a4bc0ab1f392e25ded, regionState=OPENING, regionLocation=80666a11a09f,42913,1733400130845 2024-12-05T12:06:25,838 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=171, ppid=169, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=ef081bb0e17d22aa06240dd7c58dc8f6, ASSIGN because future has completed 2024-12-05T12:06:25,839 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=172, ppid=171, state=RUNNABLE, hasLock=false; OpenRegionProcedure ef081bb0e17d22aa06240dd7c58dc8f6, server=80666a11a09f,42305,1733400131012}] 2024-12-05T12:06:25,839 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=170, ppid=169, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=32910da2d48395a4bc0ab1f392e25ded, ASSIGN because future has completed 2024-12-05T12:06:25,840 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=173, ppid=170, state=RUNNABLE, hasLock=false; OpenRegionProcedure 32910da2d48395a4bc0ab1f392e25ded, server=80666a11a09f,42913,1733400130845}] 2024-12-05T12:06:25,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=169 2024-12-05T12:06:25,993 INFO [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] handler.AssignRegionHandler(132): Open testtb-testExportExpiredSnapshot,1,1733400385656.ef081bb0e17d22aa06240dd7c58dc8f6. 2024-12-05T12:06:25,994 INFO [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] handler.AssignRegionHandler(132): Open testtb-testExportExpiredSnapshot,,1733400385656.32910da2d48395a4bc0ab1f392e25ded. 2024-12-05T12:06:25,994 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] regionserver.HRegion(7752): Opening region: {ENCODED => 32910da2d48395a4bc0ab1f392e25ded, NAME => 'testtb-testExportExpiredSnapshot,,1733400385656.32910da2d48395a4bc0ab1f392e25ded.', STARTKEY => '', ENDKEY => '1'} 2024-12-05T12:06:25,994 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.HRegion(7752): Opening region: {ENCODED => ef081bb0e17d22aa06240dd7c58dc8f6, NAME => 'testtb-testExportExpiredSnapshot,1,1733400385656.ef081bb0e17d22aa06240dd7c58dc8f6.', STARTKEY => '1', ENDKEY => ''} 2024-12-05T12:06:25,994 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportExpiredSnapshot,,1733400385656.32910da2d48395a4bc0ab1f392e25ded. service=AccessControlService 2024-12-05T12:06:25,994 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportExpiredSnapshot,1,1733400385656.ef081bb0e17d22aa06240dd7c58dc8f6. service=AccessControlService 2024-12-05T12:06:25,994 INFO [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-05T12:06:25,994 INFO [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-05T12:06:25,994 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportExpiredSnapshot ef081bb0e17d22aa06240dd7c58dc8f6 2024-12-05T12:06:25,994 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportExpiredSnapshot 32910da2d48395a4bc0ab1f392e25ded 2024-12-05T12:06:25,994 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.HRegion(898): Instantiated testtb-testExportExpiredSnapshot,1,1733400385656.ef081bb0e17d22aa06240dd7c58dc8f6.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-05T12:06:25,994 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] regionserver.HRegion(898): Instantiated testtb-testExportExpiredSnapshot,,1733400385656.32910da2d48395a4bc0ab1f392e25ded.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-05T12:06:25,995 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.HRegion(7794): checking encryption for ef081bb0e17d22aa06240dd7c58dc8f6 2024-12-05T12:06:25,995 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] regionserver.HRegion(7794): checking encryption for 32910da2d48395a4bc0ab1f392e25ded 2024-12-05T12:06:25,995 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.HRegion(7797): checking classloading for ef081bb0e17d22aa06240dd7c58dc8f6 2024-12-05T12:06:25,995 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] regionserver.HRegion(7797): checking classloading for 32910da2d48395a4bc0ab1f392e25ded 2024-12-05T12:06:25,996 INFO [StoreOpener-32910da2d48395a4bc0ab1f392e25ded-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 32910da2d48395a4bc0ab1f392e25ded 2024-12-05T12:06:25,996 INFO [StoreOpener-ef081bb0e17d22aa06240dd7c58dc8f6-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region ef081bb0e17d22aa06240dd7c58dc8f6 2024-12-05T12:06:25,997 INFO [StoreOpener-ef081bb0e17d22aa06240dd7c58dc8f6-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region ef081bb0e17d22aa06240dd7c58dc8f6 columnFamilyName cf 2024-12-05T12:06:25,997 INFO [StoreOpener-32910da2d48395a4bc0ab1f392e25ded-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 32910da2d48395a4bc0ab1f392e25ded columnFamilyName cf 2024-12-05T12:06:25,998 DEBUG [StoreOpener-ef081bb0e17d22aa06240dd7c58dc8f6-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T12:06:25,998 DEBUG [StoreOpener-32910da2d48395a4bc0ab1f392e25ded-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T12:06:25,998 INFO [StoreOpener-ef081bb0e17d22aa06240dd7c58dc8f6-1 {}] regionserver.HStore(327): Store=ef081bb0e17d22aa06240dd7c58dc8f6/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-05T12:06:25,998 INFO [StoreOpener-32910da2d48395a4bc0ab1f392e25ded-1 {}] regionserver.HStore(327): Store=32910da2d48395a4bc0ab1f392e25ded/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-05T12:06:25,999 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] regionserver.HRegion(1038): replaying wal for 32910da2d48395a4bc0ab1f392e25ded 2024-12-05T12:06:25,999 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.HRegion(1038): replaying wal for ef081bb0e17d22aa06240dd7c58dc8f6 2024-12-05T12:06:25,999 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportExpiredSnapshot/32910da2d48395a4bc0ab1f392e25ded 2024-12-05T12:06:25,999 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportExpiredSnapshot/ef081bb0e17d22aa06240dd7c58dc8f6 2024-12-05T12:06:26,000 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportExpiredSnapshot/ef081bb0e17d22aa06240dd7c58dc8f6 2024-12-05T12:06:26,000 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportExpiredSnapshot/32910da2d48395a4bc0ab1f392e25ded 2024-12-05T12:06:26,000 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.HRegion(1048): stopping wal replay for ef081bb0e17d22aa06240dd7c58dc8f6 2024-12-05T12:06:26,000 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] regionserver.HRegion(1048): stopping wal replay for 32910da2d48395a4bc0ab1f392e25ded 2024-12-05T12:06:26,000 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.HRegion(1060): Cleaning up temporary data for ef081bb0e17d22aa06240dd7c58dc8f6 2024-12-05T12:06:26,000 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] regionserver.HRegion(1060): Cleaning up temporary data for 32910da2d48395a4bc0ab1f392e25ded 2024-12-05T12:06:26,001 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] regionserver.HRegion(1093): writing seq id for 32910da2d48395a4bc0ab1f392e25ded 2024-12-05T12:06:26,001 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.HRegion(1093): writing seq id for ef081bb0e17d22aa06240dd7c58dc8f6 2024-12-05T12:06:26,003 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportExpiredSnapshot/32910da2d48395a4bc0ab1f392e25ded/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-05T12:06:26,003 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportExpiredSnapshot/ef081bb0e17d22aa06240dd7c58dc8f6/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-05T12:06:26,003 INFO [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.HRegion(1114): Opened ef081bb0e17d22aa06240dd7c58dc8f6; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=64236593, jitterRate=-0.042800173163414}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-05T12:06:26,003 INFO [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] regionserver.HRegion(1114): Opened 32910da2d48395a4bc0ab1f392e25ded; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=65734162, jitterRate=-0.02048465609550476}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-05T12:06:26,003 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.HRegion(1122): Running coprocessor post-open hooks for ef081bb0e17d22aa06240dd7c58dc8f6 2024-12-05T12:06:26,003 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 32910da2d48395a4bc0ab1f392e25ded 2024-12-05T12:06:26,004 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.HRegion(1006): Region open journal for ef081bb0e17d22aa06240dd7c58dc8f6: Running coprocessor pre-open hook at 1733400385995Writing region info on filesystem at 1733400385995Initializing all the Stores at 1733400385996 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733400385996Cleaning up temporary data from old regions at 1733400386000 (+4 ms)Running coprocessor post-open hooks at 1733400386003 (+3 ms)Region opened successfully at 1733400386004 (+1 ms) 2024-12-05T12:06:26,004 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] regionserver.HRegion(1006): Region open journal for 32910da2d48395a4bc0ab1f392e25ded: Running coprocessor pre-open hook at 1733400385995Writing region info on filesystem at 1733400385995Initializing all the Stores at 1733400385996 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733400385996Cleaning up temporary data from old regions at 1733400386000 (+4 ms)Running coprocessor post-open hooks at 1733400386003 (+3 ms)Region opened successfully at 1733400386004 (+1 ms) 2024-12-05T12:06:26,004 INFO [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportExpiredSnapshot,1,1733400385656.ef081bb0e17d22aa06240dd7c58dc8f6., pid=172, masterSystemTime=1733400385990 2024-12-05T12:06:26,004 INFO [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportExpiredSnapshot,,1733400385656.32910da2d48395a4bc0ab1f392e25ded., pid=173, masterSystemTime=1733400385992 2024-12-05T12:06:26,006 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportExpiredSnapshot,1,1733400385656.ef081bb0e17d22aa06240dd7c58dc8f6. 2024-12-05T12:06:26,006 INFO [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] handler.AssignRegionHandler(153): Opened testtb-testExportExpiredSnapshot,1,1733400385656.ef081bb0e17d22aa06240dd7c58dc8f6. 2024-12-05T12:06:26,006 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=171 updating hbase:meta row=ef081bb0e17d22aa06240dd7c58dc8f6, regionState=OPEN, openSeqNum=2, regionLocation=80666a11a09f,42305,1733400131012 2024-12-05T12:06:26,006 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportExpiredSnapshot,,1733400385656.32910da2d48395a4bc0ab1f392e25ded. 2024-12-05T12:06:26,006 INFO [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] handler.AssignRegionHandler(153): Opened testtb-testExportExpiredSnapshot,,1733400385656.32910da2d48395a4bc0ab1f392e25ded. 2024-12-05T12:06:26,007 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=170 updating hbase:meta row=32910da2d48395a4bc0ab1f392e25ded, regionState=OPEN, openSeqNum=2, regionLocation=80666a11a09f,42913,1733400130845 2024-12-05T12:06:26,008 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=172, ppid=171, state=RUNNABLE, hasLock=false; OpenRegionProcedure ef081bb0e17d22aa06240dd7c58dc8f6, server=80666a11a09f,42305,1733400131012 because future has completed 2024-12-05T12:06:26,009 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=173, ppid=170, state=RUNNABLE, hasLock=false; OpenRegionProcedure 32910da2d48395a4bc0ab1f392e25ded, server=80666a11a09f,42913,1733400130845 because future has completed 2024-12-05T12:06:26,010 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=172, resume processing ppid=171 2024-12-05T12:06:26,010 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=172, ppid=171, state=SUCCESS, hasLock=false; OpenRegionProcedure ef081bb0e17d22aa06240dd7c58dc8f6, server=80666a11a09f,42305,1733400131012 in 170 msec 2024-12-05T12:06:26,011 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=173, resume processing ppid=170 2024-12-05T12:06:26,011 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=173, ppid=170, state=SUCCESS, hasLock=false; OpenRegionProcedure 32910da2d48395a4bc0ab1f392e25ded, server=80666a11a09f,42913,1733400130845 in 170 msec 2024-12-05T12:06:26,012 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=171, ppid=169, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=ef081bb0e17d22aa06240dd7c58dc8f6, ASSIGN in 326 msec 2024-12-05T12:06:26,013 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=170, resume processing ppid=169 2024-12-05T12:06:26,013 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=170, ppid=169, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=32910da2d48395a4bc0ab1f392e25ded, ASSIGN in 327 msec 2024-12-05T12:06:26,013 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=169, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testtb-testExportExpiredSnapshot execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-05T12:06:26,013 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportExpiredSnapshot","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733400386013"}]},"ts":"1733400386013"} 2024-12-05T12:06:26,015 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportExpiredSnapshot, state=ENABLED in hbase:meta 2024-12-05T12:06:26,016 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=169, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportExpiredSnapshot execute state=CREATE_TABLE_POST_OPERATION 2024-12-05T12:06:26,016 DEBUG [PEWorker-4 {}] access.PermissionStorage(177): Writing permission with rowKey testtb-testExportExpiredSnapshot jenkins: RWXCA 2024-12-05T12:06:26,019 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42305 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportExpiredSnapshot], kv [jenkins: RWXCA] 2024-12-05T12:06:26,061 DEBUG [pool-63-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42913-0x101a6a890970001, quorum=127.0.0.1:49240, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-05T12:06:26,061 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42305-0x101a6a890970003, quorum=127.0.0.1:49240, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-05T12:06:26,061 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37629-0x101a6a890970002, quorum=127.0.0.1:49240, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-05T12:06:26,061 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34947-0x101a6a890970000, quorum=127.0.0.1:49240, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-05T12:06:26,069 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-05T12:06:26,069 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-05T12:06:26,069 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-05T12:06:26,069 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-05T12:06:26,071 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=169, state=SUCCESS, hasLock=false; CreateTableProcedure table=testtb-testExportExpiredSnapshot in 412 msec 2024-12-05T12:06:26,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=169 2024-12-05T12:06:26,288 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testtb-testExportExpiredSnapshot completed 2024-12-05T12:06:26,288 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportExpiredSnapshot,, stopping at row=testtb-testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-12-05T12:06:26,291 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testExportExpiredSnapshot 2024-12-05T12:06:26,291 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testExportExpiredSnapshot,,1733400385656.32910da2d48395a4bc0ab1f392e25ded. 2024-12-05T12:06:26,291 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-05T12:06:26,293 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportExpiredSnapshot,, stopping at row=testtb-testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-12-05T12:06:26,299 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportExpiredSnapshot,, stopping at row=testtb-testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-12-05T12:06:26,305 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportExpiredSnapshot,, stopping at row=testtb-testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-12-05T12:06:26,309 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } 2024-12-05T12:06:26,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1733400386309 (current time:1733400386309). 2024-12-05T12:06:26,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-05T12:06:26,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot emptySnaptb0-testExportExpiredSnapshot VERSION not specified, setting to 2 2024-12-05T12:06:26,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-05T12:06:26,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@c976971, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T12:06:26,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] client.ClusterIdFetcher(90): Going to request 80666a11a09f,34947,-1 for getting cluster id 2024-12-05T12:06:26,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-05T12:06:26,311 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'af3bc8f5-5d92-4279-9ca6-4b73c59b8f54' 2024-12-05T12:06:26,311 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-05T12:06:26,311 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "af3bc8f5-5d92-4279-9ca6-4b73c59b8f54" 2024-12-05T12:06:26,311 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1222a5, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T12:06:26,311 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [80666a11a09f,34947,-1] 2024-12-05T12:06:26,311 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-05T12:06:26,312 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T12:06:26,312 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35916, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-05T12:06:26,313 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1ba4a9df, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T12:06:26,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-05T12:06:26,314 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=80666a11a09f,37629,1733400130958, seqNum=-1] 2024-12-05T12:06:26,314 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-05T12:06:26,315 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50032, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-05T12:06:26,316 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947. 2024-12-05T12:06:26,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-05T12:06:26,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T12:06:26,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T12:06:26,316 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-05T12:06:26,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@15452b9d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T12:06:26,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] client.ClusterIdFetcher(90): Going to request 80666a11a09f,34947,-1 for getting cluster id 2024-12-05T12:06:26,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-05T12:06:26,318 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'af3bc8f5-5d92-4279-9ca6-4b73c59b8f54' 2024-12-05T12:06:26,318 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-05T12:06:26,318 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "af3bc8f5-5d92-4279-9ca6-4b73c59b8f54" 2024-12-05T12:06:26,318 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@43c857b9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T12:06:26,318 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [80666a11a09f,34947,-1] 2024-12-05T12:06:26,318 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-05T12:06:26,318 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T12:06:26,319 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35936, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-05T12:06:26,319 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2f626f28, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T12:06:26,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-05T12:06:26,321 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=80666a11a09f,37629,1733400130958, seqNum=-1] 2024-12-05T12:06:26,321 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-05T12:06:26,322 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50034, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-05T12:06:26,323 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportExpiredSnapshot', locateType=CURRENT is [region=hbase:acl,,1733400133831.02119c1ced80736cbd0ab02ca19ee30c., hostname=80666a11a09f,42305,1733400131012, seqNum=2] 2024-12-05T12:06:26,323 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-05T12:06:26,324 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45384, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-05T12:06:26,325 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947. 2024-12-05T12:06:26,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor153.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-05T12:06:26,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T12:06:26,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T12:06:26,325 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-05T12:06:26,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportExpiredSnapshot], kv [jenkins: RWXCA] 2024-12-05T12:06:26,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-05T12:06:26,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] procedure2.ProcedureExecutor(1139): Stored pid=174, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=174, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } 2024-12-05T12:06:26,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 }, snapshot procedure id = 174 2024-12-05T12:06:26,327 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=174, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=174, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-05T12:06:26,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=174 2024-12-05T12:06:26,328 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=174, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=174, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-05T12:06:26,330 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=174, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=174, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-05T12:06:26,335 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742245_1421 (size=170) 2024-12-05T12:06:26,335 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742245_1421 (size=170) 2024-12-05T12:06:26,335 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742245_1421 (size=170) 2024-12-05T12:06:26,336 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=174, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=174, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-05T12:06:26,336 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=175, ppid=174, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 32910da2d48395a4bc0ab1f392e25ded}, {pid=176, ppid=174, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure ef081bb0e17d22aa06240dd7c58dc8f6}] 2024-12-05T12:06:26,337 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=175, ppid=174, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 32910da2d48395a4bc0ab1f392e25ded 2024-12-05T12:06:26,337 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=176, ppid=174, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure ef081bb0e17d22aa06240dd7c58dc8f6 2024-12-05T12:06:26,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=174 2024-12-05T12:06:26,488 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42913 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=175 2024-12-05T12:06:26,488 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42305 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=176 2024-12-05T12:06:26,489 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=175}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportExpiredSnapshot,,1733400385656.32910da2d48395a4bc0ab1f392e25ded. 2024-12-05T12:06:26,489 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=176}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportExpiredSnapshot,1,1733400385656.ef081bb0e17d22aa06240dd7c58dc8f6. 2024-12-05T12:06:26,489 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=176}] regionserver.HRegion(2603): Flush status journal for ef081bb0e17d22aa06240dd7c58dc8f6: 2024-12-05T12:06:26,489 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=175}] regionserver.HRegion(2603): Flush status journal for 32910da2d48395a4bc0ab1f392e25ded: 2024-12-05T12:06:26,489 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=175}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportExpiredSnapshot,,1733400385656.32910da2d48395a4bc0ab1f392e25ded. for emptySnaptb0-testExportExpiredSnapshot completed. 2024-12-05T12:06:26,489 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=176}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportExpiredSnapshot,1,1733400385656.ef081bb0e17d22aa06240dd7c58dc8f6. for emptySnaptb0-testExportExpiredSnapshot completed. 2024-12-05T12:06:26,489 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=175}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportExpiredSnapshot,,1733400385656.32910da2d48395a4bc0ab1f392e25ded.' region-info for snapshot=emptySnaptb0-testExportExpiredSnapshot 2024-12-05T12:06:26,489 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=176}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportExpiredSnapshot,1,1733400385656.ef081bb0e17d22aa06240dd7c58dc8f6.' region-info for snapshot=emptySnaptb0-testExportExpiredSnapshot 2024-12-05T12:06:26,489 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=175}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-05T12:06:26,489 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=176}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-05T12:06:26,489 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=175}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-05T12:06:26,489 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=176}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-05T12:06:26,498 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742247_1423 (size=71) 2024-12-05T12:06:26,499 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742247_1423 (size=71) 2024-12-05T12:06:26,499 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742246_1422 (size=71) 2024-12-05T12:06:26,499 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742247_1423 (size=71) 2024-12-05T12:06:26,499 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742246_1422 (size=71) 2024-12-05T12:06:26,499 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742246_1422 (size=71) 2024-12-05T12:06:26,499 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=176}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportExpiredSnapshot,1,1733400385656.ef081bb0e17d22aa06240dd7c58dc8f6. 2024-12-05T12:06:26,499 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=175}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportExpiredSnapshot,,1733400385656.32910da2d48395a4bc0ab1f392e25ded. 2024-12-05T12:06:26,499 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=175}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=175 2024-12-05T12:06:26,499 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=176}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=176 2024-12-05T12:06:26,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] master.HMaster(4169): Remote procedure done, pid=176 2024-12-05T12:06:26,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] master.HMaster(4169): Remote procedure done, pid=175 2024-12-05T12:06:26,500 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportExpiredSnapshot on region ef081bb0e17d22aa06240dd7c58dc8f6 2024-12-05T12:06:26,500 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportExpiredSnapshot on region 32910da2d48395a4bc0ab1f392e25ded 2024-12-05T12:06:26,500 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=175, ppid=174, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 32910da2d48395a4bc0ab1f392e25ded 2024-12-05T12:06:26,500 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=176, ppid=174, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure ef081bb0e17d22aa06240dd7c58dc8f6 2024-12-05T12:06:26,502 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=176, ppid=174, state=SUCCESS, hasLock=false; SnapshotRegionProcedure ef081bb0e17d22aa06240dd7c58dc8f6 in 165 msec 2024-12-05T12:06:26,502 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=175, resume processing ppid=174 2024-12-05T12:06:26,502 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=175, ppid=174, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 32910da2d48395a4bc0ab1f392e25ded in 165 msec 2024-12-05T12:06:26,502 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=174, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=174, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-05T12:06:26,504 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=174, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=174, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-05T12:06:26,504 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-12-05T12:06:26,504 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-12-05T12:06:26,504 DEBUG [MobRegionSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T12:06:26,505 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(328): No files under family: cf 2024-12-05T12:06:26,509 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742248_1424 (size=63) 2024-12-05T12:06:26,509 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742248_1424 (size=63) 2024-12-05T12:06:26,510 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742248_1424 (size=63) 2024-12-05T12:06:26,511 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=174, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=174, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-05T12:06:26,511 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testExportExpiredSnapshot 2024-12-05T12:06:26,511 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/.hbase-snapshot/.tmp/emptySnaptb0-testExportExpiredSnapshot 2024-12-05T12:06:26,520 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742249_1425 (size=653) 2024-12-05T12:06:26,520 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742249_1425 (size=653) 2024-12-05T12:06:26,520 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742249_1425 (size=653) 2024-12-05T12:06:26,523 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=174, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=174, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-05T12:06:26,526 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=174, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=174, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-05T12:06:26,527 DEBUG [PEWorker-1 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/.hbase-snapshot/.tmp/emptySnaptb0-testExportExpiredSnapshot to hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/.hbase-snapshot/emptySnaptb0-testExportExpiredSnapshot 2024-12-05T12:06:26,529 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=174, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=174, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-05T12:06:26,529 DEBUG [PEWorker-1 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 }, snapshot procedure id = 174 2024-12-05T12:06:26,530 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=174, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=174, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } in 203 msec 2024-12-05T12:06:26,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=174 2024-12-05T12:06:26,648 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportExpiredSnapshot completed 2024-12-05T12:06:26,656 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42913 {}] regionserver.HRegion(8528): writing data to region testtb-testExportExpiredSnapshot,,1733400385656.32910da2d48395a4bc0ab1f392e25ded. with WAL disabled. Data may be lost in the event of a crash. 2024-12-05T12:06:26,658 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42305 {}] regionserver.HRegion(8528): writing data to region testtb-testExportExpiredSnapshot,1,1733400385656.ef081bb0e17d22aa06240dd7c58dc8f6. with WAL disabled. Data may be lost in the event of a crash. 2024-12-05T12:06:26,659 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportExpiredSnapshot,, stopping at row=testtb-testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-12-05T12:06:26,661 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testExportExpiredSnapshot 2024-12-05T12:06:26,661 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testExportExpiredSnapshot,,1733400385656.32910da2d48395a4bc0ab1f392e25ded. 2024-12-05T12:06:26,661 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-05T12:06:26,663 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportExpiredSnapshot,, stopping at row=testtb-testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-12-05T12:06:26,667 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportExpiredSnapshot,, stopping at row=testtb-testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-12-05T12:06:26,672 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportExpiredSnapshot,, stopping at row=testtb-testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-12-05T12:06:26,674 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } 2024-12-05T12:06:26,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1733400386674 (current time:1733400386674). 2024-12-05T12:06:26,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-05T12:06:26,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snaptb0-testExportExpiredSnapshot VERSION not specified, setting to 2 2024-12-05T12:06:26,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-05T12:06:26,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2b0f9b2e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T12:06:26,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] client.ClusterIdFetcher(90): Going to request 80666a11a09f,34947,-1 for getting cluster id 2024-12-05T12:06:26,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-05T12:06:26,676 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'af3bc8f5-5d92-4279-9ca6-4b73c59b8f54' 2024-12-05T12:06:26,676 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-05T12:06:26,676 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "af3bc8f5-5d92-4279-9ca6-4b73c59b8f54" 2024-12-05T12:06:26,676 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2c491d63, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T12:06:26,676 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [80666a11a09f,34947,-1] 2024-12-05T12:06:26,676 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-05T12:06:26,676 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T12:06:26,677 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35962, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-05T12:06:26,677 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@59e1ac5d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T12:06:26,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-05T12:06:26,678 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=80666a11a09f,37629,1733400130958, seqNum=-1] 2024-12-05T12:06:26,678 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-05T12:06:26,679 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50036, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-05T12:06:26,680 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947. 2024-12-05T12:06:26,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-05T12:06:26,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T12:06:26,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T12:06:26,680 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-05T12:06:26,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@cc6eb40, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T12:06:26,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] client.ClusterIdFetcher(90): Going to request 80666a11a09f,34947,-1 for getting cluster id 2024-12-05T12:06:26,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-05T12:06:26,682 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'af3bc8f5-5d92-4279-9ca6-4b73c59b8f54' 2024-12-05T12:06:26,682 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-05T12:06:26,682 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "af3bc8f5-5d92-4279-9ca6-4b73c59b8f54" 2024-12-05T12:06:26,682 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2bd8f77c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T12:06:26,682 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [80666a11a09f,34947,-1] 2024-12-05T12:06:26,682 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-05T12:06:26,682 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T12:06:26,683 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35984, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-05T12:06:26,683 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@564540e1, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T12:06:26,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-05T12:06:26,685 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=80666a11a09f,37629,1733400130958, seqNum=-1] 2024-12-05T12:06:26,685 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-05T12:06:26,686 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50044, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-05T12:06:26,687 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportExpiredSnapshot', locateType=CURRENT is [region=hbase:acl,,1733400133831.02119c1ced80736cbd0ab02ca19ee30c., hostname=80666a11a09f,42305,1733400131012, seqNum=2] 2024-12-05T12:06:26,687 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-05T12:06:26,688 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45392, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-05T12:06:26,689 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947. 2024-12-05T12:06:26,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor153.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-05T12:06:26,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T12:06:26,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T12:06:26,689 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-05T12:06:26,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportExpiredSnapshot], kv [jenkins: RWXCA] 2024-12-05T12:06:26,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-05T12:06:26,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] procedure2.ProcedureExecutor(1139): Stored pid=177, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=177, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } 2024-12-05T12:06:26,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 }, snapshot procedure id = 177 2024-12-05T12:06:26,691 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=177, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=177, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-05T12:06:26,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=177 2024-12-05T12:06:26,692 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=177, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=177, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-05T12:06:26,694 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=177, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=177, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-05T12:06:26,698 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742250_1426 (size=165) 2024-12-05T12:06:26,698 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742250_1426 (size=165) 2024-12-05T12:06:26,699 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742250_1426 (size=165) 2024-12-05T12:06:26,700 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=177, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=177, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-05T12:06:26,700 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=178, ppid=177, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 32910da2d48395a4bc0ab1f392e25ded}, {pid=179, ppid=177, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure ef081bb0e17d22aa06240dd7c58dc8f6}] 2024-12-05T12:06:26,701 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=179, ppid=177, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure ef081bb0e17d22aa06240dd7c58dc8f6 2024-12-05T12:06:26,701 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=178, ppid=177, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 32910da2d48395a4bc0ab1f392e25ded 2024-12-05T12:06:26,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=177 2024-12-05T12:06:26,852 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42913 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=178 2024-12-05T12:06:26,852 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42305 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=179 2024-12-05T12:06:26,852 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=179}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportExpiredSnapshot,1,1733400385656.ef081bb0e17d22aa06240dd7c58dc8f6. 2024-12-05T12:06:26,852 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=178}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportExpiredSnapshot,,1733400385656.32910da2d48395a4bc0ab1f392e25ded. 2024-12-05T12:06:26,853 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=179}] regionserver.HRegion(2902): Flushing ef081bb0e17d22aa06240dd7c58dc8f6 1/1 column families, dataSize=3.13 KB heapSize=7 KB 2024-12-05T12:06:26,853 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=178}] regionserver.HRegion(2902): Flushing 32910da2d48395a4bc0ab1f392e25ded 1/1 column families, dataSize=132 B heapSize=544 B 2024-12-05T12:06:26,869 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=179}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b20241205de66f8371bcc488b8e00e024707f0dde_ef081bb0e17d22aa06240dd7c58dc8f6 is 71, key is 1221008e0d52bab1249e631928868b5f/cf:q/1733400386658/Put/seqid=0 2024-12-05T12:06:26,874 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742251_1427 (size=8241) 2024-12-05T12:06:26,874 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742251_1427 (size=8241) 2024-12-05T12:06:26,874 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=178}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241205ec385a3eaad84b97800c0045f7afa259_32910da2d48395a4bc0ab1f392e25ded is 71, key is 0e04d7fad22bedb5c3820a559aa5fcc1/cf:q/1733400386656/Put/seqid=0 2024-12-05T12:06:26,874 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742251_1427 (size=8241) 2024-12-05T12:06:26,875 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=179}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T12:06:26,879 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742252_1428 (size=5032) 2024-12-05T12:06:26,879 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742252_1428 (size=5032) 2024-12-05T12:06:26,879 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=179}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b20241205de66f8371bcc488b8e00e024707f0dde_ef081bb0e17d22aa06240dd7c58dc8f6 to hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/mobdir/data/default/testtb-testExportExpiredSnapshot/3d21f28acea2939462cb18e5d9576f2f/cf/c4ca4238a0b923820dcc509a6f75849b20241205de66f8371bcc488b8e00e024707f0dde_ef081bb0e17d22aa06240dd7c58dc8f6 2024-12-05T12:06:26,879 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742252_1428 (size=5032) 2024-12-05T12:06:26,880 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=178}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T12:06:26,880 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=179}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportExpiredSnapshot/ef081bb0e17d22aa06240dd7c58dc8f6/.tmp/cf/51c879091292488584c9b95e0ab2ff83, store: [table=testtb-testExportExpiredSnapshot family=cf region=ef081bb0e17d22aa06240dd7c58dc8f6] 2024-12-05T12:06:26,880 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=179}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportExpiredSnapshot/ef081bb0e17d22aa06240dd7c58dc8f6/.tmp/cf/51c879091292488584c9b95e0ab2ff83 is 209, key is 15aa24d748ea270c5df237597220a1253/cf:q/1733400386658/Put/seqid=0 2024-12-05T12:06:26,883 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=178}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241205ec385a3eaad84b97800c0045f7afa259_32910da2d48395a4bc0ab1f392e25ded to hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/mobdir/data/default/testtb-testExportExpiredSnapshot/3d21f28acea2939462cb18e5d9576f2f/cf/d41d8cd98f00b204e9800998ecf8427e20241205ec385a3eaad84b97800c0045f7afa259_32910da2d48395a4bc0ab1f392e25ded 2024-12-05T12:06:26,884 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=178}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportExpiredSnapshot/32910da2d48395a4bc0ab1f392e25ded/.tmp/cf/5597b8b25d824f918f0ead5cd13aabcc, store: [table=testtb-testExportExpiredSnapshot family=cf region=32910da2d48395a4bc0ab1f392e25ded] 2024-12-05T12:06:26,885 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=178}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportExpiredSnapshot/32910da2d48395a4bc0ab1f392e25ded/.tmp/cf/5597b8b25d824f918f0ead5cd13aabcc is 209, key is 09ddb6df1841e4bfa242dbf5228962223/cf:q/1733400386656/Put/seqid=0 2024-12-05T12:06:26,886 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742253_1429 (size=15202) 2024-12-05T12:06:26,886 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742253_1429 (size=15202) 2024-12-05T12:06:26,889 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742254_1430 (size=5709) 2024-12-05T12:06:26,889 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742254_1430 (size=5709) 2024-12-05T12:06:26,890 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742254_1430 (size=5709) 2024-12-05T12:06:26,890 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=178}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=6, memsize=132, hasBloomFilter=true, into tmp file hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportExpiredSnapshot/32910da2d48395a4bc0ab1f392e25ded/.tmp/cf/5597b8b25d824f918f0ead5cd13aabcc 2024-12-05T12:06:26,891 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742253_1429 (size=15202) 2024-12-05T12:06:26,891 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=179}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=6, memsize=3.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportExpiredSnapshot/ef081bb0e17d22aa06240dd7c58dc8f6/.tmp/cf/51c879091292488584c9b95e0ab2ff83 2024-12-05T12:06:26,895 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=178}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportExpiredSnapshot/32910da2d48395a4bc0ab1f392e25ded/.tmp/cf/5597b8b25d824f918f0ead5cd13aabcc as hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportExpiredSnapshot/32910da2d48395a4bc0ab1f392e25ded/cf/5597b8b25d824f918f0ead5cd13aabcc 2024-12-05T12:06:26,895 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=179}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportExpiredSnapshot/ef081bb0e17d22aa06240dd7c58dc8f6/.tmp/cf/51c879091292488584c9b95e0ab2ff83 as hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportExpiredSnapshot/ef081bb0e17d22aa06240dd7c58dc8f6/cf/51c879091292488584c9b95e0ab2ff83 2024-12-05T12:06:26,899 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=179}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportExpiredSnapshot/ef081bb0e17d22aa06240dd7c58dc8f6/cf/51c879091292488584c9b95e0ab2ff83, entries=48, sequenceid=6, filesize=14.8 K 2024-12-05T12:06:26,899 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=178}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportExpiredSnapshot/32910da2d48395a4bc0ab1f392e25ded/cf/5597b8b25d824f918f0ead5cd13aabcc, entries=2, sequenceid=6, filesize=5.6 K 2024-12-05T12:06:26,900 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=178}] regionserver.HRegion(3140): Finished flush of dataSize ~132 B/132, heapSize ~528 B/528, currentSize=0 B/0 for 32910da2d48395a4bc0ab1f392e25ded in 48ms, sequenceid=6, compaction requested=false 2024-12-05T12:06:26,900 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=179}] regionserver.HRegion(3140): Finished flush of dataSize ~3.13 KB/3204, heapSize ~6.98 KB/7152, currentSize=0 B/0 for ef081bb0e17d22aa06240dd7c58dc8f6 in 48ms, sequenceid=6, compaction requested=false 2024-12-05T12:06:26,900 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=178}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportExpiredSnapshot' 2024-12-05T12:06:26,900 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=179}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportExpiredSnapshot' 2024-12-05T12:06:26,900 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=179}] regionserver.HRegion(2603): Flush status journal for ef081bb0e17d22aa06240dd7c58dc8f6: 2024-12-05T12:06:26,900 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=178}] regionserver.HRegion(2603): Flush status journal for 32910da2d48395a4bc0ab1f392e25ded: 2024-12-05T12:06:26,900 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=179}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportExpiredSnapshot,1,1733400385656.ef081bb0e17d22aa06240dd7c58dc8f6. for snaptb0-testExportExpiredSnapshot completed. 2024-12-05T12:06:26,900 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=178}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportExpiredSnapshot,,1733400385656.32910da2d48395a4bc0ab1f392e25ded. for snaptb0-testExportExpiredSnapshot completed. 2024-12-05T12:06:26,901 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=179}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportExpiredSnapshot,1,1733400385656.ef081bb0e17d22aa06240dd7c58dc8f6.' region-info for snapshot=snaptb0-testExportExpiredSnapshot 2024-12-05T12:06:26,901 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=178}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportExpiredSnapshot,,1733400385656.32910da2d48395a4bc0ab1f392e25ded.' region-info for snapshot=snaptb0-testExportExpiredSnapshot 2024-12-05T12:06:26,901 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=179}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-05T12:06:26,901 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=178}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-05T12:06:26,901 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=179}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportExpiredSnapshot/ef081bb0e17d22aa06240dd7c58dc8f6/cf/51c879091292488584c9b95e0ab2ff83] hfiles 2024-12-05T12:06:26,901 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=178}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportExpiredSnapshot/32910da2d48395a4bc0ab1f392e25ded/cf/5597b8b25d824f918f0ead5cd13aabcc] hfiles 2024-12-05T12:06:26,901 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=179}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportExpiredSnapshot/ef081bb0e17d22aa06240dd7c58dc8f6/cf/51c879091292488584c9b95e0ab2ff83 for snapshot=snaptb0-testExportExpiredSnapshot 2024-12-05T12:06:26,901 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=178}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportExpiredSnapshot/32910da2d48395a4bc0ab1f392e25ded/cf/5597b8b25d824f918f0ead5cd13aabcc for snapshot=snaptb0-testExportExpiredSnapshot 2024-12-05T12:06:26,907 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742256_1432 (size=110) 2024-12-05T12:06:26,907 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742256_1432 (size=110) 2024-12-05T12:06:26,907 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742256_1432 (size=110) 2024-12-05T12:06:26,907 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742255_1431 (size=110) 2024-12-05T12:06:26,907 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=178}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportExpiredSnapshot,,1733400385656.32910da2d48395a4bc0ab1f392e25ded. 2024-12-05T12:06:26,907 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=178}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=178 2024-12-05T12:06:26,908 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742255_1431 (size=110) 2024-12-05T12:06:26,908 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742255_1431 (size=110) 2024-12-05T12:06:26,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] master.HMaster(4169): Remote procedure done, pid=178 2024-12-05T12:06:26,908 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportExpiredSnapshot on region 32910da2d48395a4bc0ab1f392e25ded 2024-12-05T12:06:26,908 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=179}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportExpiredSnapshot,1,1733400385656.ef081bb0e17d22aa06240dd7c58dc8f6. 2024-12-05T12:06:26,908 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=178, ppid=177, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 32910da2d48395a4bc0ab1f392e25ded 2024-12-05T12:06:26,908 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=179}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=179 2024-12-05T12:06:26,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] master.HMaster(4169): Remote procedure done, pid=179 2024-12-05T12:06:26,909 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportExpiredSnapshot on region ef081bb0e17d22aa06240dd7c58dc8f6 2024-12-05T12:06:26,909 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=179, ppid=177, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure ef081bb0e17d22aa06240dd7c58dc8f6 2024-12-05T12:06:26,910 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=178, ppid=177, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 32910da2d48395a4bc0ab1f392e25ded in 209 msec 2024-12-05T12:06:26,911 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=179, resume processing ppid=177 2024-12-05T12:06:26,911 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=177, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=177, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-05T12:06:26,911 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=179, ppid=177, state=SUCCESS, hasLock=false; SnapshotRegionProcedure ef081bb0e17d22aa06240dd7c58dc8f6 in 210 msec 2024-12-05T12:06:26,912 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=177, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=177, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-05T12:06:26,913 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-12-05T12:06:26,913 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-12-05T12:06:26,913 DEBUG [MobRegionSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T12:06:26,914 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(366): Adding snapshot references for [hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/mobdir/data/default/testtb-testExportExpiredSnapshot/3d21f28acea2939462cb18e5d9576f2f/cf/c4ca4238a0b923820dcc509a6f75849b20241205de66f8371bcc488b8e00e024707f0dde_ef081bb0e17d22aa06240dd7c58dc8f6, hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/mobdir/data/default/testtb-testExportExpiredSnapshot/3d21f28acea2939462cb18e5d9576f2f/cf/d41d8cd98f00b204e9800998ecf8427e20241205ec385a3eaad84b97800c0045f7afa259_32910da2d48395a4bc0ab1f392e25ded] hfiles 2024-12-05T12:06:26,914 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (1/2): hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/mobdir/data/default/testtb-testExportExpiredSnapshot/3d21f28acea2939462cb18e5d9576f2f/cf/c4ca4238a0b923820dcc509a6f75849b20241205de66f8371bcc488b8e00e024707f0dde_ef081bb0e17d22aa06240dd7c58dc8f6 2024-12-05T12:06:26,914 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (2/2): hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/mobdir/data/default/testtb-testExportExpiredSnapshot/3d21f28acea2939462cb18e5d9576f2f/cf/d41d8cd98f00b204e9800998ecf8427e20241205ec385a3eaad84b97800c0045f7afa259_32910da2d48395a4bc0ab1f392e25ded 2024-12-05T12:06:26,919 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742257_1433 (size=294) 2024-12-05T12:06:26,919 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742257_1433 (size=294) 2024-12-05T12:06:26,920 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742257_1433 (size=294) 2024-12-05T12:06:26,921 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=177, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=177, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-05T12:06:26,921 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testExportExpiredSnapshot 2024-12-05T12:06:26,922 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/.hbase-snapshot/.tmp/snaptb0-testExportExpiredSnapshot 2024-12-05T12:06:26,932 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742258_1434 (size=963) 2024-12-05T12:06:26,932 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742258_1434 (size=963) 2024-12-05T12:06:26,932 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742258_1434 (size=963) 2024-12-05T12:06:26,940 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=177, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=177, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-05T12:06:26,946 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=177, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=177, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-05T12:06:26,946 DEBUG [PEWorker-4 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/.hbase-snapshot/.tmp/snaptb0-testExportExpiredSnapshot to hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/.hbase-snapshot/snaptb0-testExportExpiredSnapshot 2024-12-05T12:06:26,947 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=177, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=177, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-05T12:06:26,948 DEBUG [PEWorker-4 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 }, snapshot procedure id = 177 2024-12-05T12:06:26,949 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=177, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=177, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } in 258 msec 2024-12-05T12:06:27,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=177 2024-12-05T12:06:27,008 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportExpiredSnapshot completed 2024-12-05T12:06:27,009 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'testExportExpiredSnapshot', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-05T12:06:27,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] procedure2.ProcedureExecutor(1139): Stored pid=180, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testExportExpiredSnapshot 2024-12-05T12:06:27,011 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=180, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testExportExpiredSnapshot execute state=CREATE_TABLE_PRE_OPERATION 2024-12-05T12:06:27,011 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testExportExpiredSnapshot" procId is: 180 2024-12-05T12:06:27,012 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=180, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testExportExpiredSnapshot execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-05T12:06:27,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=180 2024-12-05T12:06:27,017 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742259_1435 (size=436) 2024-12-05T12:06:27,017 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742259_1435 (size=436) 2024-12-05T12:06:27,018 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742259_1435 (size=436) 2024-12-05T12:06:27,019 INFO [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => d819fc7dca4529788a4ca3a58e3fe9e5, NAME => 'testExportExpiredSnapshot,,1733400387009.d819fc7dca4529788a4ca3a58e3fe9e5.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testExportExpiredSnapshot', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300 2024-12-05T12:06:27,019 INFO [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => e2d55a422a8d6cd55d6d7cbfae79e377, NAME => 'testExportExpiredSnapshot,1,1733400387009.e2d55a422a8d6cd55d6d7cbfae79e377.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testExportExpiredSnapshot', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300 2024-12-05T12:06:27,029 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742261_1437 (size=61) 2024-12-05T12:06:27,029 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742261_1437 (size=61) 2024-12-05T12:06:27,029 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742261_1437 (size=61) 2024-12-05T12:06:27,029 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(898): Instantiated testExportExpiredSnapshot,1,1733400387009.e2d55a422a8d6cd55d6d7cbfae79e377.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-05T12:06:27,029 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1722): Closing e2d55a422a8d6cd55d6d7cbfae79e377, disabling compactions & flushes 2024-12-05T12:06:27,029 INFO [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1755): Closing region testExportExpiredSnapshot,1,1733400387009.e2d55a422a8d6cd55d6d7cbfae79e377. 2024-12-05T12:06:27,029 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testExportExpiredSnapshot,1,1733400387009.e2d55a422a8d6cd55d6d7cbfae79e377. 2024-12-05T12:06:27,029 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testExportExpiredSnapshot,1,1733400387009.e2d55a422a8d6cd55d6d7cbfae79e377. after waiting 0 ms 2024-12-05T12:06:27,029 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testExportExpiredSnapshot,1,1733400387009.e2d55a422a8d6cd55d6d7cbfae79e377. 2024-12-05T12:06:27,030 INFO [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1973): Closed testExportExpiredSnapshot,1,1733400387009.e2d55a422a8d6cd55d6d7cbfae79e377. 2024-12-05T12:06:27,030 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1676): Region close journal for e2d55a422a8d6cd55d6d7cbfae79e377: Waiting for close lock at 1733400387029Disabling compacts and flushes for region at 1733400387029Disabling writes for close at 1733400387029Writing region close event to WAL at 1733400387029Closed at 1733400387029 2024-12-05T12:06:27,036 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742260_1436 (size=61) 2024-12-05T12:06:27,036 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742260_1436 (size=61) 2024-12-05T12:06:27,036 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742260_1436 (size=61) 2024-12-05T12:06:27,037 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(898): Instantiated testExportExpiredSnapshot,,1733400387009.d819fc7dca4529788a4ca3a58e3fe9e5.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-05T12:06:27,037 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1722): Closing d819fc7dca4529788a4ca3a58e3fe9e5, disabling compactions & flushes 2024-12-05T12:06:27,037 INFO [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1755): Closing region testExportExpiredSnapshot,,1733400387009.d819fc7dca4529788a4ca3a58e3fe9e5. 2024-12-05T12:06:27,037 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testExportExpiredSnapshot,,1733400387009.d819fc7dca4529788a4ca3a58e3fe9e5. 2024-12-05T12:06:27,037 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testExportExpiredSnapshot,,1733400387009.d819fc7dca4529788a4ca3a58e3fe9e5. after waiting 0 ms 2024-12-05T12:06:27,037 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testExportExpiredSnapshot,,1733400387009.d819fc7dca4529788a4ca3a58e3fe9e5. 2024-12-05T12:06:27,037 INFO [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1973): Closed testExportExpiredSnapshot,,1733400387009.d819fc7dca4529788a4ca3a58e3fe9e5. 2024-12-05T12:06:27,037 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1676): Region close journal for d819fc7dca4529788a4ca3a58e3fe9e5: Waiting for close lock at 1733400387037Disabling compacts and flushes for region at 1733400387037Disabling writes for close at 1733400387037Writing region close event to WAL at 1733400387037Closed at 1733400387037 2024-12-05T12:06:27,038 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=180, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testExportExpiredSnapshot execute state=CREATE_TABLE_ADD_TO_META 2024-12-05T12:06:27,038 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testExportExpiredSnapshot,1,1733400387009.e2d55a422a8d6cd55d6d7cbfae79e377.","families":{"info":[{"qualifier":"regioninfo","vlen":60,"tag":[],"timestamp":"1733400387038"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733400387038"}]},"ts":"1733400387038"} 2024-12-05T12:06:27,038 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testExportExpiredSnapshot,,1733400387009.d819fc7dca4529788a4ca3a58e3fe9e5.","families":{"info":[{"qualifier":"regioninfo","vlen":60,"tag":[],"timestamp":"1733400387038"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733400387038"}]},"ts":"1733400387038"} 2024-12-05T12:06:27,040 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-12-05T12:06:27,041 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=180, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testExportExpiredSnapshot execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-05T12:06:27,041 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testExportExpiredSnapshot","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733400387041"}]},"ts":"1733400387041"} 2024-12-05T12:06:27,043 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=testExportExpiredSnapshot, state=ENABLING in hbase:meta 2024-12-05T12:06:27,043 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(204): Hosts are {80666a11a09f=0} racks are {/default-rack=0} 2024-12-05T12:06:27,044 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-05T12:06:27,044 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-05T12:06:27,044 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-05T12:06:27,044 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-05T12:06:27,044 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-05T12:06:27,044 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-05T12:06:27,044 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-05T12:06:27,044 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-05T12:06:27,044 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-05T12:06:27,044 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-05T12:06:27,045 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=181, ppid=180, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=d819fc7dca4529788a4ca3a58e3fe9e5, ASSIGN}, {pid=182, ppid=180, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=e2d55a422a8d6cd55d6d7cbfae79e377, ASSIGN}] 2024-12-05T12:06:27,045 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=182, ppid=180, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=e2d55a422a8d6cd55d6d7cbfae79e377, ASSIGN 2024-12-05T12:06:27,046 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=181, ppid=180, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=d819fc7dca4529788a4ca3a58e3fe9e5, ASSIGN 2024-12-05T12:06:27,046 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(269): Starting pid=182, ppid=180, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=e2d55a422a8d6cd55d6d7cbfae79e377, ASSIGN; state=OFFLINE, location=80666a11a09f,42913,1733400130845; forceNewPlan=false, retain=false 2024-12-05T12:06:27,046 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=181, ppid=180, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=d819fc7dca4529788a4ca3a58e3fe9e5, ASSIGN; state=OFFLINE, location=80666a11a09f,42305,1733400131012; forceNewPlan=false, retain=false 2024-12-05T12:06:27,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=180 2024-12-05T12:06:27,197 INFO [80666a11a09f:34947 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-12-05T12:06:27,197 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=181 updating hbase:meta row=d819fc7dca4529788a4ca3a58e3fe9e5, regionState=OPENING, regionLocation=80666a11a09f,42305,1733400131012 2024-12-05T12:06:27,197 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=182 updating hbase:meta row=e2d55a422a8d6cd55d6d7cbfae79e377, regionState=OPENING, regionLocation=80666a11a09f,42913,1733400130845 2024-12-05T12:06:27,198 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=182, ppid=180, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=e2d55a422a8d6cd55d6d7cbfae79e377, ASSIGN because future has completed 2024-12-05T12:06:27,199 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=183, ppid=182, state=RUNNABLE, hasLock=false; OpenRegionProcedure e2d55a422a8d6cd55d6d7cbfae79e377, server=80666a11a09f,42913,1733400130845}] 2024-12-05T12:06:27,199 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=181, ppid=180, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=d819fc7dca4529788a4ca3a58e3fe9e5, ASSIGN because future has completed 2024-12-05T12:06:27,199 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=184, ppid=181, state=RUNNABLE, hasLock=false; OpenRegionProcedure d819fc7dca4529788a4ca3a58e3fe9e5, server=80666a11a09f,42305,1733400131012}] 2024-12-05T12:06:27,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=180 2024-12-05T12:06:27,353 INFO [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] handler.AssignRegionHandler(132): Open testExportExpiredSnapshot,1,1733400387009.e2d55a422a8d6cd55d6d7cbfae79e377. 2024-12-05T12:06:27,353 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(7752): Opening region: {ENCODED => e2d55a422a8d6cd55d6d7cbfae79e377, NAME => 'testExportExpiredSnapshot,1,1733400387009.e2d55a422a8d6cd55d6d7cbfae79e377.', STARTKEY => '1', ENDKEY => ''} 2024-12-05T12:06:27,353 INFO [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] handler.AssignRegionHandler(132): Open testExportExpiredSnapshot,,1733400387009.d819fc7dca4529788a4ca3a58e3fe9e5. 2024-12-05T12:06:27,353 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(7752): Opening region: {ENCODED => d819fc7dca4529788a4ca3a58e3fe9e5, NAME => 'testExportExpiredSnapshot,,1733400387009.d819fc7dca4529788a4ca3a58e3fe9e5.', STARTKEY => '', ENDKEY => '1'} 2024-12-05T12:06:27,353 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(8280): Registered coprocessor service: region=testExportExpiredSnapshot,1,1733400387009.e2d55a422a8d6cd55d6d7cbfae79e377. service=AccessControlService 2024-12-05T12:06:27,353 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(8280): Registered coprocessor service: region=testExportExpiredSnapshot,,1733400387009.d819fc7dca4529788a4ca3a58e3fe9e5. service=AccessControlService 2024-12-05T12:06:27,353 INFO [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-05T12:06:27,354 INFO [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-05T12:06:27,354 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testExportExpiredSnapshot e2d55a422a8d6cd55d6d7cbfae79e377 2024-12-05T12:06:27,354 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testExportExpiredSnapshot d819fc7dca4529788a4ca3a58e3fe9e5 2024-12-05T12:06:27,354 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(898): Instantiated testExportExpiredSnapshot,1,1733400387009.e2d55a422a8d6cd55d6d7cbfae79e377.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-05T12:06:27,354 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(898): Instantiated testExportExpiredSnapshot,,1733400387009.d819fc7dca4529788a4ca3a58e3fe9e5.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-05T12:06:27,354 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(7794): checking encryption for e2d55a422a8d6cd55d6d7cbfae79e377 2024-12-05T12:06:27,354 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(7794): checking encryption for d819fc7dca4529788a4ca3a58e3fe9e5 2024-12-05T12:06:27,354 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(7797): checking classloading for d819fc7dca4529788a4ca3a58e3fe9e5 2024-12-05T12:06:27,354 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(7797): checking classloading for e2d55a422a8d6cd55d6d7cbfae79e377 2024-12-05T12:06:27,355 INFO [StoreOpener-e2d55a422a8d6cd55d6d7cbfae79e377-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region e2d55a422a8d6cd55d6d7cbfae79e377 2024-12-05T12:06:27,355 INFO [StoreOpener-d819fc7dca4529788a4ca3a58e3fe9e5-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region d819fc7dca4529788a4ca3a58e3fe9e5 2024-12-05T12:06:27,356 INFO [StoreOpener-d819fc7dca4529788a4ca3a58e3fe9e5-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region d819fc7dca4529788a4ca3a58e3fe9e5 columnFamilyName cf 2024-12-05T12:06:27,356 INFO [StoreOpener-e2d55a422a8d6cd55d6d7cbfae79e377-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region e2d55a422a8d6cd55d6d7cbfae79e377 columnFamilyName cf 2024-12-05T12:06:27,356 DEBUG [StoreOpener-e2d55a422a8d6cd55d6d7cbfae79e377-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T12:06:27,356 DEBUG [StoreOpener-d819fc7dca4529788a4ca3a58e3fe9e5-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T12:06:27,357 INFO [StoreOpener-e2d55a422a8d6cd55d6d7cbfae79e377-1 {}] regionserver.HStore(327): Store=e2d55a422a8d6cd55d6d7cbfae79e377/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-05T12:06:27,357 INFO [StoreOpener-d819fc7dca4529788a4ca3a58e3fe9e5-1 {}] regionserver.HStore(327): Store=d819fc7dca4529788a4ca3a58e3fe9e5/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-05T12:06:27,357 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(1038): replaying wal for e2d55a422a8d6cd55d6d7cbfae79e377 2024-12-05T12:06:27,357 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(1038): replaying wal for d819fc7dca4529788a4ca3a58e3fe9e5 2024-12-05T12:06:27,357 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testExportExpiredSnapshot/d819fc7dca4529788a4ca3a58e3fe9e5 2024-12-05T12:06:27,357 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testExportExpiredSnapshot/e2d55a422a8d6cd55d6d7cbfae79e377 2024-12-05T12:06:27,357 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testExportExpiredSnapshot/d819fc7dca4529788a4ca3a58e3fe9e5 2024-12-05T12:06:27,358 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testExportExpiredSnapshot/e2d55a422a8d6cd55d6d7cbfae79e377 2024-12-05T12:06:27,358 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(1048): stopping wal replay for e2d55a422a8d6cd55d6d7cbfae79e377 2024-12-05T12:06:27,358 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(1048): stopping wal replay for d819fc7dca4529788a4ca3a58e3fe9e5 2024-12-05T12:06:27,358 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(1060): Cleaning up temporary data for e2d55a422a8d6cd55d6d7cbfae79e377 2024-12-05T12:06:27,358 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(1060): Cleaning up temporary data for d819fc7dca4529788a4ca3a58e3fe9e5 2024-12-05T12:06:27,359 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(1093): writing seq id for e2d55a422a8d6cd55d6d7cbfae79e377 2024-12-05T12:06:27,359 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(1093): writing seq id for d819fc7dca4529788a4ca3a58e3fe9e5 2024-12-05T12:06:27,360 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testExportExpiredSnapshot/d819fc7dca4529788a4ca3a58e3fe9e5/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-05T12:06:27,360 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testExportExpiredSnapshot/e2d55a422a8d6cd55d6d7cbfae79e377/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-05T12:06:27,360 INFO [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(1114): Opened d819fc7dca4529788a4ca3a58e3fe9e5; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=71390744, jitterRate=0.0638049840927124}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-05T12:06:27,360 INFO [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(1114): Opened e2d55a422a8d6cd55d6d7cbfae79e377; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=67370455, jitterRate=0.0038980096578598022}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-05T12:06:27,360 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(1122): Running coprocessor post-open hooks for d819fc7dca4529788a4ca3a58e3fe9e5 2024-12-05T12:06:27,360 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(1122): Running coprocessor post-open hooks for e2d55a422a8d6cd55d6d7cbfae79e377 2024-12-05T12:06:27,361 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(1006): Region open journal for e2d55a422a8d6cd55d6d7cbfae79e377: Running coprocessor pre-open hook at 1733400387354Writing region info on filesystem at 1733400387354Initializing all the Stores at 1733400387354Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733400387354Cleaning up temporary data from old regions at 1733400387358 (+4 ms)Running coprocessor post-open hooks at 1733400387360 (+2 ms)Region opened successfully at 1733400387361 (+1 ms) 2024-12-05T12:06:27,361 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(1006): Region open journal for d819fc7dca4529788a4ca3a58e3fe9e5: Running coprocessor pre-open hook at 1733400387354Writing region info on filesystem at 1733400387354Initializing all the Stores at 1733400387354Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733400387354Cleaning up temporary data from old regions at 1733400387358 (+4 ms)Running coprocessor post-open hooks at 1733400387360 (+2 ms)Region opened successfully at 1733400387361 (+1 ms) 2024-12-05T12:06:27,361 INFO [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegionServer(2236): Post open deploy tasks for testExportExpiredSnapshot,,1733400387009.d819fc7dca4529788a4ca3a58e3fe9e5., pid=184, masterSystemTime=1733400387351 2024-12-05T12:06:27,362 INFO [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegionServer(2236): Post open deploy tasks for testExportExpiredSnapshot,1,1733400387009.e2d55a422a8d6cd55d6d7cbfae79e377., pid=183, masterSystemTime=1733400387350 2024-12-05T12:06:27,362 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegionServer(2266): Finished post open deploy task for testExportExpiredSnapshot,,1733400387009.d819fc7dca4529788a4ca3a58e3fe9e5. 2024-12-05T12:06:27,363 INFO [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] handler.AssignRegionHandler(153): Opened testExportExpiredSnapshot,,1733400387009.d819fc7dca4529788a4ca3a58e3fe9e5. 2024-12-05T12:06:27,363 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=181 updating hbase:meta row=d819fc7dca4529788a4ca3a58e3fe9e5, regionState=OPEN, openSeqNum=2, regionLocation=80666a11a09f,42305,1733400131012 2024-12-05T12:06:27,363 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegionServer(2266): Finished post open deploy task for testExportExpiredSnapshot,1,1733400387009.e2d55a422a8d6cd55d6d7cbfae79e377. 2024-12-05T12:06:27,363 INFO [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] handler.AssignRegionHandler(153): Opened testExportExpiredSnapshot,1,1733400387009.e2d55a422a8d6cd55d6d7cbfae79e377. 2024-12-05T12:06:27,363 WARN [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34947 {}] assignment.AssignmentManager(1543): Unable to acquire lock for regionNode state=OPEN, location=80666a11a09f,42305,1733400131012, table=testExportExpiredSnapshot, region=d819fc7dca4529788a4ca3a58e3fe9e5. It is likely that another thread is currently holding the lock. To avoid deadlock, skip execution for now. 2024-12-05T12:06:27,364 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=182 updating hbase:meta row=e2d55a422a8d6cd55d6d7cbfae79e377, regionState=OPEN, openSeqNum=2, regionLocation=80666a11a09f,42913,1733400130845 2024-12-05T12:06:27,365 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=184, ppid=181, state=RUNNABLE, hasLock=false; OpenRegionProcedure d819fc7dca4529788a4ca3a58e3fe9e5, server=80666a11a09f,42305,1733400131012 because future has completed 2024-12-05T12:06:27,365 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=183, ppid=182, state=RUNNABLE, hasLock=false; OpenRegionProcedure e2d55a422a8d6cd55d6d7cbfae79e377, server=80666a11a09f,42913,1733400130845 because future has completed 2024-12-05T12:06:27,367 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=184, resume processing ppid=181 2024-12-05T12:06:27,367 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=184, ppid=181, state=SUCCESS, hasLock=false; OpenRegionProcedure d819fc7dca4529788a4ca3a58e3fe9e5, server=80666a11a09f,42305,1733400131012 in 166 msec 2024-12-05T12:06:27,368 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=183, resume processing ppid=182 2024-12-05T12:06:27,368 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=181, ppid=180, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=d819fc7dca4529788a4ca3a58e3fe9e5, ASSIGN in 322 msec 2024-12-05T12:06:27,368 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=183, ppid=182, state=SUCCESS, hasLock=false; OpenRegionProcedure e2d55a422a8d6cd55d6d7cbfae79e377, server=80666a11a09f,42913,1733400130845 in 167 msec 2024-12-05T12:06:27,369 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=182, resume processing ppid=180 2024-12-05T12:06:27,369 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=182, ppid=180, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=e2d55a422a8d6cd55d6d7cbfae79e377, ASSIGN in 323 msec 2024-12-05T12:06:27,369 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=180, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testExportExpiredSnapshot execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-05T12:06:27,370 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testExportExpiredSnapshot","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733400387369"}]},"ts":"1733400387369"} 2024-12-05T12:06:27,371 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=testExportExpiredSnapshot, state=ENABLED in hbase:meta 2024-12-05T12:06:27,372 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=180, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testExportExpiredSnapshot execute state=CREATE_TABLE_POST_OPERATION 2024-12-05T12:06:27,372 DEBUG [PEWorker-3 {}] access.PermissionStorage(177): Writing permission with rowKey testExportExpiredSnapshot jenkins: RWXCA 2024-12-05T12:06:27,374 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42305 {}] access.PermissionStorage(613): Read acl: entry[testExportExpiredSnapshot], kv [jenkins: RWXCA] 2024-12-05T12:06:27,419 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34947-0x101a6a890970000, quorum=127.0.0.1:49240, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-05T12:06:27,419 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42305-0x101a6a890970003, quorum=127.0.0.1:49240, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-05T12:06:27,419 DEBUG [pool-63-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42913-0x101a6a890970001, quorum=127.0.0.1:49240, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-05T12:06:27,419 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37629-0x101a6a890970002, quorum=127.0.0.1:49240, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-05T12:06:27,455 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-05T12:06:27,455 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-05T12:06:27,455 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-05T12:06:27,455 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-05T12:06:27,455 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-05T12:06:27,455 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-05T12:06:27,455 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-05T12:06:27,455 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-05T12:06:27,456 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=180, state=SUCCESS, hasLock=false; CreateTableProcedure table=testExportExpiredSnapshot in 445 msec 2024-12-05T12:06:27,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=180 2024-12-05T12:06:27,638 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testExportExpiredSnapshot completed 2024-12-05T12:06:27,638 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportExpiredSnapshot,, stopping at row=testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-12-05T12:06:27,640 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testExportExpiredSnapshot 2024-12-05T12:06:27,640 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testExportExpiredSnapshot,,1733400387009.d819fc7dca4529788a4ca3a58e3fe9e5. 2024-12-05T12:06:27,640 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-05T12:06:27,642 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportExpiredSnapshot,, stopping at row=testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-12-05T12:06:27,646 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportExpiredSnapshot,, stopping at row=testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-12-05T12:06:27,650 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportExpiredSnapshot,, stopping at row=testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-12-05T12:06:27,656 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42305 {}] regionserver.HRegion(8528): writing data to region testExportExpiredSnapshot,,1733400387009.d819fc7dca4529788a4ca3a58e3fe9e5. with WAL disabled. Data may be lost in the event of a crash. 2024-12-05T12:06:27,658 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42913 {}] regionserver.HRegion(8528): writing data to region testExportExpiredSnapshot,1,1733400387009.e2d55a422a8d6cd55d6d7cbfae79e377. with WAL disabled. Data may be lost in the event of a crash. 2024-12-05T12:06:27,658 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportExpiredSnapshot,, stopping at row=testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-12-05T12:06:27,661 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testExportExpiredSnapshot 2024-12-05T12:06:27,661 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testExportExpiredSnapshot,,1733400387009.d819fc7dca4529788a4ca3a58e3fe9e5. 2024-12-05T12:06:27,661 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-05T12:06:27,662 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportExpiredSnapshot,, stopping at row=testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-12-05T12:06:27,667 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportExpiredSnapshot,, stopping at row=testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-12-05T12:06:27,672 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } 2024-12-05T12:06:27,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snapshot-testExportExpiredSnapshot VERSION not specified, setting to 2 2024-12-05T12:06:27,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-05T12:06:27,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@496380a8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T12:06:27,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] client.ClusterIdFetcher(90): Going to request 80666a11a09f,34947,-1 for getting cluster id 2024-12-05T12:06:27,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-05T12:06:27,674 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'af3bc8f5-5d92-4279-9ca6-4b73c59b8f54' 2024-12-05T12:06:27,674 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-05T12:06:27,674 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "af3bc8f5-5d92-4279-9ca6-4b73c59b8f54" 2024-12-05T12:06:27,674 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@622abdd0, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T12:06:27,674 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [80666a11a09f,34947,-1] 2024-12-05T12:06:27,674 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-05T12:06:27,674 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T12:06:27,675 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35992, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-05T12:06:27,675 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@78296ca0, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T12:06:27,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-05T12:06:27,676 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=80666a11a09f,37629,1733400130958, seqNum=-1] 2024-12-05T12:06:27,676 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-05T12:06:27,677 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50046, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-05T12:06:27,678 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947. 2024-12-05T12:06:27,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-05T12:06:27,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T12:06:27,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T12:06:27,678 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-05T12:06:27,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1383f8c0, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T12:06:27,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] client.ClusterIdFetcher(90): Going to request 80666a11a09f,34947,-1 for getting cluster id 2024-12-05T12:06:27,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-05T12:06:27,680 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'af3bc8f5-5d92-4279-9ca6-4b73c59b8f54' 2024-12-05T12:06:27,680 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-05T12:06:27,680 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "af3bc8f5-5d92-4279-9ca6-4b73c59b8f54" 2024-12-05T12:06:27,680 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@f514bcf, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T12:06:27,680 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [80666a11a09f,34947,-1] 2024-12-05T12:06:27,680 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-05T12:06:27,680 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T12:06:27,681 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36008, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-05T12:06:27,681 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@623b551, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T12:06:27,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-05T12:06:27,682 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=80666a11a09f,37629,1733400130958, seqNum=-1] 2024-12-05T12:06:27,682 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-05T12:06:27,683 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50058, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-05T12:06:27,684 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testExportExpiredSnapshot', locateType=CURRENT is [region=hbase:acl,,1733400133831.02119c1ced80736cbd0ab02ca19ee30c., hostname=80666a11a09f,42305,1733400131012, seqNum=2] 2024-12-05T12:06:27,684 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-05T12:06:27,685 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45396, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-05T12:06:27,686 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947. 2024-12-05T12:06:27,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor153.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-05T12:06:27,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T12:06:27,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T12:06:27,686 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-05T12:06:27,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] access.PermissionStorage(613): Read acl: entry[testExportExpiredSnapshot], kv [jenkins: RWXCA] 2024-12-05T12:06:27,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-05T12:06:27,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] procedure2.ProcedureExecutor(1139): Stored pid=185, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } 2024-12-05T12:06:27,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 }, snapshot procedure id = 185 2024-12-05T12:06:27,689 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=185, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_PREPARE 2024-12-05T12:06:27,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=185 2024-12-05T12:06:27,690 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=185, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-05T12:06:27,692 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=185, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-05T12:06:27,714 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742262_1438 (size=152) 2024-12-05T12:06:27,714 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742262_1438 (size=152) 2024-12-05T12:06:27,714 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742262_1438 (size=152) 2024-12-05T12:06:27,715 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=185, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-05T12:06:27,716 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=186, ppid=185, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure d819fc7dca4529788a4ca3a58e3fe9e5}, {pid=187, ppid=185, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure e2d55a422a8d6cd55d6d7cbfae79e377}] 2024-12-05T12:06:27,716 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=187, ppid=185, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure e2d55a422a8d6cd55d6d7cbfae79e377 2024-12-05T12:06:27,716 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=186, ppid=185, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure d819fc7dca4529788a4ca3a58e3fe9e5 2024-12-05T12:06:27,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=185 2024-12-05T12:06:27,868 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42913 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=187 2024-12-05T12:06:27,868 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42305 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=186 2024-12-05T12:06:27,868 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testExportExpiredSnapshot,,1733400387009.d819fc7dca4529788a4ca3a58e3fe9e5. 2024-12-05T12:06:27,868 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testExportExpiredSnapshot,1,1733400387009.e2d55a422a8d6cd55d6d7cbfae79e377. 2024-12-05T12:06:27,868 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] regionserver.HRegion(2902): Flushing d819fc7dca4529788a4ca3a58e3fe9e5 1/1 column families, dataSize=199 B heapSize=688 B 2024-12-05T12:06:27,868 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] regionserver.HRegion(2902): Flushing e2d55a422a8d6cd55d6d7cbfae79e377 1/1 column families, dataSize=3.06 KB heapSize=6.86 KB 2024-12-05T12:06:27,883 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241205b93b4365d06349368449cb5828807fc7_d819fc7dca4529788a4ca3a58e3fe9e5 is 71, key is 023eccf4b8d37752f9eb0c9e0190d4e0/cf:q/1733400387656/Put/seqid=0 2024-12-05T12:06:27,883 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b2024120590e2d48c8c13470eab210a9c9a58fd3b_e2d55a422a8d6cd55d6d7cbfae79e377 is 71, key is 12c2710a70cb9a40029efa5e03cf02a8/cf:q/1733400387657/Put/seqid=0 2024-12-05T12:06:27,889 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742264_1440 (size=8171) 2024-12-05T12:06:27,889 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742264_1440 (size=8171) 2024-12-05T12:06:27,889 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742264_1440 (size=8171) 2024-12-05T12:06:27,890 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T12:06:27,892 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742263_1439 (size=5101) 2024-12-05T12:06:27,892 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742263_1439 (size=5101) 2024-12-05T12:06:27,893 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742263_1439 (size=5101) 2024-12-05T12:06:27,893 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T12:06:27,894 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b2024120590e2d48c8c13470eab210a9c9a58fd3b_e2d55a422a8d6cd55d6d7cbfae79e377 to hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/mobdir/data/default/testExportExpiredSnapshot/1030f41967fbb659ab4c2a7a1774d313/cf/c4ca4238a0b923820dcc509a6f75849b2024120590e2d48c8c13470eab210a9c9a58fd3b_e2d55a422a8d6cd55d6d7cbfae79e377 2024-12-05T12:06:27,894 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testExportExpiredSnapshot/e2d55a422a8d6cd55d6d7cbfae79e377/.tmp/cf/c243d9ad8e1249449d9436230db0cc19, store: [table=testExportExpiredSnapshot family=cf region=e2d55a422a8d6cd55d6d7cbfae79e377] 2024-12-05T12:06:27,895 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testExportExpiredSnapshot/e2d55a422a8d6cd55d6d7cbfae79e377/.tmp/cf/c243d9ad8e1249449d9436230db0cc19 is 202, key is 1f497725fdc793b43aa0d68cff74c58fc/cf:q/1733400387657/Put/seqid=0 2024-12-05T12:06:27,896 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241205b93b4365d06349368449cb5828807fc7_d819fc7dca4529788a4ca3a58e3fe9e5 to hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/mobdir/data/default/testExportExpiredSnapshot/1030f41967fbb659ab4c2a7a1774d313/cf/d41d8cd98f00b204e9800998ecf8427e20241205b93b4365d06349368449cb5828807fc7_d819fc7dca4529788a4ca3a58e3fe9e5 2024-12-05T12:06:27,897 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testExportExpiredSnapshot/d819fc7dca4529788a4ca3a58e3fe9e5/.tmp/cf/a5f900147a2849d0bf366446de5f6957, store: [table=testExportExpiredSnapshot family=cf region=d819fc7dca4529788a4ca3a58e3fe9e5] 2024-12-05T12:06:27,898 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testExportExpiredSnapshot/d819fc7dca4529788a4ca3a58e3fe9e5/.tmp/cf/a5f900147a2849d0bf366446de5f6957 is 202, key is 04ef7dac19b0e1c9f479549142730714a/cf:q/1733400387656/Put/seqid=0 2024-12-05T12:06:27,904 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742265_1441 (size=14661) 2024-12-05T12:06:27,904 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742265_1441 (size=14661) 2024-12-05T12:06:27,904 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742265_1441 (size=14661) 2024-12-05T12:06:27,904 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=5, memsize=3.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testExportExpiredSnapshot/e2d55a422a8d6cd55d6d7cbfae79e377/.tmp/cf/c243d9ad8e1249449d9436230db0cc19 2024-12-05T12:06:27,909 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testExportExpiredSnapshot/e2d55a422a8d6cd55d6d7cbfae79e377/.tmp/cf/c243d9ad8e1249449d9436230db0cc19 as hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testExportExpiredSnapshot/e2d55a422a8d6cd55d6d7cbfae79e377/cf/c243d9ad8e1249449d9436230db0cc19 2024-12-05T12:06:27,912 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742266_1442 (size=5888) 2024-12-05T12:06:27,912 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742266_1442 (size=5888) 2024-12-05T12:06:27,912 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742266_1442 (size=5888) 2024-12-05T12:06:27,912 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=5, memsize=199, hasBloomFilter=true, into tmp file hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testExportExpiredSnapshot/d819fc7dca4529788a4ca3a58e3fe9e5/.tmp/cf/a5f900147a2849d0bf366446de5f6957 2024-12-05T12:06:27,913 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testExportExpiredSnapshot/e2d55a422a8d6cd55d6d7cbfae79e377/cf/c243d9ad8e1249449d9436230db0cc19, entries=47, sequenceid=5, filesize=14.3 K 2024-12-05T12:06:27,914 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] regionserver.HRegion(3140): Finished flush of dataSize ~3.06 KB/3137, heapSize ~6.84 KB/7008, currentSize=0 B/0 for e2d55a422a8d6cd55d6d7cbfae79e377 in 46ms, sequenceid=5, compaction requested=false 2024-12-05T12:06:27,915 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testExportExpiredSnapshot' 2024-12-05T12:06:27,915 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] regionserver.HRegion(2603): Flush status journal for e2d55a422a8d6cd55d6d7cbfae79e377: 2024-12-05T12:06:27,915 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] regionserver.SnapshotRegionCallable(75): Snapshotting region testExportExpiredSnapshot,1,1733400387009.e2d55a422a8d6cd55d6d7cbfae79e377. for snapshot-testExportExpiredSnapshot completed. 2024-12-05T12:06:27,915 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] snapshot.SnapshotManifest(241): Storing 'testExportExpiredSnapshot,1,1733400387009.e2d55a422a8d6cd55d6d7cbfae79e377.' region-info for snapshot=snapshot-testExportExpiredSnapshot 2024-12-05T12:06:27,915 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-05T12:06:27,915 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testExportExpiredSnapshot/e2d55a422a8d6cd55d6d7cbfae79e377/cf/c243d9ad8e1249449d9436230db0cc19] hfiles 2024-12-05T12:06:27,915 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testExportExpiredSnapshot/e2d55a422a8d6cd55d6d7cbfae79e377/cf/c243d9ad8e1249449d9436230db0cc19 for snapshot=snapshot-testExportExpiredSnapshot 2024-12-05T12:06:27,916 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testExportExpiredSnapshot/d819fc7dca4529788a4ca3a58e3fe9e5/.tmp/cf/a5f900147a2849d0bf366446de5f6957 as hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testExportExpiredSnapshot/d819fc7dca4529788a4ca3a58e3fe9e5/cf/a5f900147a2849d0bf366446de5f6957 2024-12-05T12:06:27,920 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742267_1443 (size=103) 2024-12-05T12:06:27,920 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742267_1443 (size=103) 2024-12-05T12:06:27,921 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742267_1443 (size=103) 2024-12-05T12:06:27,921 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testExportExpiredSnapshot/d819fc7dca4529788a4ca3a58e3fe9e5/cf/a5f900147a2849d0bf366446de5f6957, entries=3, sequenceid=5, filesize=5.8 K 2024-12-05T12:06:27,921 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testExportExpiredSnapshot,1,1733400387009.e2d55a422a8d6cd55d6d7cbfae79e377. 2024-12-05T12:06:27,921 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=187 2024-12-05T12:06:27,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] master.HMaster(4169): Remote procedure done, pid=187 2024-12-05T12:06:27,921 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] regionserver.HRegion(3140): Finished flush of dataSize ~199 B/199, heapSize ~672 B/672, currentSize=0 B/0 for d819fc7dca4529788a4ca3a58e3fe9e5 in 53ms, sequenceid=5, compaction requested=false 2024-12-05T12:06:27,921 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snapshot-testExportExpiredSnapshot on region e2d55a422a8d6cd55d6d7cbfae79e377 2024-12-05T12:06:27,922 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] regionserver.HRegion(2603): Flush status journal for d819fc7dca4529788a4ca3a58e3fe9e5: 2024-12-05T12:06:27,922 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] regionserver.SnapshotRegionCallable(75): Snapshotting region testExportExpiredSnapshot,,1733400387009.d819fc7dca4529788a4ca3a58e3fe9e5. for snapshot-testExportExpiredSnapshot completed. 2024-12-05T12:06:27,922 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=187, ppid=185, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure e2d55a422a8d6cd55d6d7cbfae79e377 2024-12-05T12:06:27,922 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] snapshot.SnapshotManifest(241): Storing 'testExportExpiredSnapshot,,1733400387009.d819fc7dca4529788a4ca3a58e3fe9e5.' region-info for snapshot=snapshot-testExportExpiredSnapshot 2024-12-05T12:06:27,922 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-05T12:06:27,922 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testExportExpiredSnapshot/d819fc7dca4529788a4ca3a58e3fe9e5/cf/a5f900147a2849d0bf366446de5f6957] hfiles 2024-12-05T12:06:27,922 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testExportExpiredSnapshot/d819fc7dca4529788a4ca3a58e3fe9e5/cf/a5f900147a2849d0bf366446de5f6957 for snapshot=snapshot-testExportExpiredSnapshot 2024-12-05T12:06:27,925 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=187, ppid=185, state=SUCCESS, hasLock=false; SnapshotRegionProcedure e2d55a422a8d6cd55d6d7cbfae79e377 in 208 msec 2024-12-05T12:06:27,927 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742268_1444 (size=103) 2024-12-05T12:06:27,927 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742268_1444 (size=103) 2024-12-05T12:06:27,927 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742268_1444 (size=103) 2024-12-05T12:06:27,927 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testExportExpiredSnapshot,,1733400387009.d819fc7dca4529788a4ca3a58e3fe9e5. 2024-12-05T12:06:27,927 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=186 2024-12-05T12:06:27,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] master.HMaster(4169): Remote procedure done, pid=186 2024-12-05T12:06:27,928 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snapshot-testExportExpiredSnapshot on region d819fc7dca4529788a4ca3a58e3fe9e5 2024-12-05T12:06:27,928 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=186, ppid=185, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure d819fc7dca4529788a4ca3a58e3fe9e5 2024-12-05T12:06:27,930 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=186, resume processing ppid=185 2024-12-05T12:06:27,930 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=185, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-05T12:06:27,930 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=186, ppid=185, state=SUCCESS, hasLock=false; SnapshotRegionProcedure d819fc7dca4529788a4ca3a58e3fe9e5 in 213 msec 2024-12-05T12:06:27,931 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=185, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-05T12:06:27,931 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-12-05T12:06:27,931 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-12-05T12:06:27,931 DEBUG [MobRegionSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T12:06:27,932 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(366): Adding snapshot references for [hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/mobdir/data/default/testExportExpiredSnapshot/1030f41967fbb659ab4c2a7a1774d313/cf/c4ca4238a0b923820dcc509a6f75849b2024120590e2d48c8c13470eab210a9c9a58fd3b_e2d55a422a8d6cd55d6d7cbfae79e377, hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/mobdir/data/default/testExportExpiredSnapshot/1030f41967fbb659ab4c2a7a1774d313/cf/d41d8cd98f00b204e9800998ecf8427e20241205b93b4365d06349368449cb5828807fc7_d819fc7dca4529788a4ca3a58e3fe9e5] hfiles 2024-12-05T12:06:27,932 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (1/2): hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/mobdir/data/default/testExportExpiredSnapshot/1030f41967fbb659ab4c2a7a1774d313/cf/c4ca4238a0b923820dcc509a6f75849b2024120590e2d48c8c13470eab210a9c9a58fd3b_e2d55a422a8d6cd55d6d7cbfae79e377 2024-12-05T12:06:27,932 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (2/2): hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/mobdir/data/default/testExportExpiredSnapshot/1030f41967fbb659ab4c2a7a1774d313/cf/d41d8cd98f00b204e9800998ecf8427e20241205b93b4365d06349368449cb5828807fc7_d819fc7dca4529788a4ca3a58e3fe9e5 2024-12-05T12:06:27,937 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742269_1445 (size=287) 2024-12-05T12:06:27,937 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742269_1445 (size=287) 2024-12-05T12:06:27,937 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742269_1445 (size=287) 2024-12-05T12:06:27,939 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=185, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-05T12:06:27,939 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snapshot-testExportExpiredSnapshot 2024-12-05T12:06:27,939 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/.hbase-snapshot/.tmp/snapshot-testExportExpiredSnapshot 2024-12-05T12:06:27,946 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742270_1446 (size=935) 2024-12-05T12:06:27,946 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742270_1446 (size=935) 2024-12-05T12:06:27,947 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742270_1446 (size=935) 2024-12-05T12:06:27,948 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=185, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-05T12:06:27,953 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=185, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-05T12:06:27,954 DEBUG [PEWorker-2 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/.hbase-snapshot/.tmp/snapshot-testExportExpiredSnapshot to hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/.hbase-snapshot/snapshot-testExportExpiredSnapshot 2024-12-05T12:06:27,955 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=185, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_POST_OPERATION 2024-12-05T12:06:27,955 DEBUG [PEWorker-2 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 }, snapshot procedure id = 185 2024-12-05T12:06:27,956 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=185, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } in 268 msec 2024-12-05T12:06:28,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=185 2024-12-05T12:06:28,008 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testExportExpiredSnapshot completed 2024-12-05T12:06:29,347 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733400137807_0007_000001 (auth:SIMPLE) from 127.0.0.1:39586 2024-12-05T12:06:29,358 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_693627247/yarn-7091037402/MiniMRCluster_693627247-localDir-nm-1_0/usercache/jenkins/appcache/application_1733400137807_0007/container_1733400137807_0007_01_000001/launch_container.sh] 2024-12-05T12:06:29,358 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_693627247/yarn-7091037402/MiniMRCluster_693627247-localDir-nm-1_0/usercache/jenkins/appcache/application_1733400137807_0007/container_1733400137807_0007_01_000001/container_tokens] 2024-12-05T12:06:29,358 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_693627247/yarn-7091037402/MiniMRCluster_693627247-localDir-nm-1_0/usercache/jenkins/appcache/application_1733400137807_0007/container_1733400137807_0007_01_000001/sysfs] 2024-12-05T12:06:30,540 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-05T12:06:30,568 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testExportExpiredSnapshot 2024-12-05T12:06:30,568 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testExportExpiredSnapshot Metrics about Tables on a single HBase RegionServer 2024-12-05T12:06:30,568 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportExpiredSnapshot 2024-12-05T12:06:30,568 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportExpiredSnapshot Metrics about Tables on a single HBase RegionServer 2024-12-05T12:06:30,568 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-05T12:06:30,568 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithMergeRegion 2024-12-05T12:06:36,070 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-05T12:06:38,015 INFO [Time-limited test {}] snapshot.TestExportSnapshot(515): HDFS export destination path: hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/export-test/export-1733400398014 2024-12-05T12:06:38,015 INFO [Time-limited test {}] snapshot.TestExportSnapshot(542): tgtFsUri=hdfs://localhost:37541, tgtDir=hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/export-test/export-1733400398014, rawTgtDir=hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/export-test/export-1733400398014, srcFsUri=hdfs://localhost:37541, srcDir=hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300 2024-12-05T12:06:38,040 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1094): inputFs=hdfs://localhost:37541, inputRoot=hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300 2024-12-05T12:06:38,040 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1095): outputFs=DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-954888707_22, ugi=jenkins (auth:SIMPLE)]], outputRoot=hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/export-test/export-1733400398014, skipTmp=false, initialOutputSnapshotDir=hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/export-test/export-1733400398014/.hbase-snapshot/.tmp/snapshot-testExportExpiredSnapshot 2024-12-05T12:06:38,042 INFO [Time-limited test {}] snapshot.ExportSnapshot(1104): Verify the source snapshot's expiration status and integrity. 2024-12-05T12:06:38,043 ERROR [Time-limited test {}] util.AbstractHBaseTool(152): Error running command-line tool org.apache.hadoop.hbase.snapshot.SnapshotTTLExpiredException: TTL for snapshot 'snapshot-testExportExpiredSnapshot' has already expired. at org.apache.hadoop.hbase.snapshot.ExportSnapshot.verifySnapshot(ExportSnapshot.java:960) ~[classes/:?] at org.apache.hadoop.hbase.snapshot.ExportSnapshot.doWork(ExportSnapshot.java:1105) ~[classes/:?] at org.apache.hadoop.hbase.util.AbstractHBaseTool.run(AbstractHBaseTool.java:150) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.util.ToolRunner.run(ToolRunner.java:82) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hbase.snapshot.TestExportSnapshot.runExportSnapshot(TestExportSnapshot.java:570) ~[test-classes/:?] at org.apache.hadoop.hbase.snapshot.TestExportSnapshot.testExportExpiredSnapshot(TestExportSnapshot.java:362) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-05T12:06:38,044 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.2 disable testtb-testExportExpiredSnapshot 2024-12-05T12:06:38,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] procedure2.ProcedureExecutor(1139): Stored pid=188, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testtb-testExportExpiredSnapshot 2024-12-05T12:06:38,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=188 2024-12-05T12:06:38,048 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportExpiredSnapshot","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733400398047"}]},"ts":"1733400398047"} 2024-12-05T12:06:38,049 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportExpiredSnapshot, state=DISABLING in hbase:meta 2024-12-05T12:06:38,049 INFO [PEWorker-1 {}] procedure.DisableTableProcedure(284): Set testtb-testExportExpiredSnapshot to state=DISABLING 2024-12-05T12:06:38,050 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=189, ppid=188, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportExpiredSnapshot}] 2024-12-05T12:06:38,052 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=190, ppid=189, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=32910da2d48395a4bc0ab1f392e25ded, UNASSIGN}, {pid=191, ppid=189, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=ef081bb0e17d22aa06240dd7c58dc8f6, UNASSIGN}] 2024-12-05T12:06:38,052 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=191, ppid=189, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=ef081bb0e17d22aa06240dd7c58dc8f6, UNASSIGN 2024-12-05T12:06:38,053 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=190, ppid=189, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=32910da2d48395a4bc0ab1f392e25ded, UNASSIGN 2024-12-05T12:06:38,053 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=191 updating hbase:meta row=ef081bb0e17d22aa06240dd7c58dc8f6, regionState=CLOSING, regionLocation=80666a11a09f,42305,1733400131012 2024-12-05T12:06:38,053 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=190 updating hbase:meta row=32910da2d48395a4bc0ab1f392e25ded, regionState=CLOSING, regionLocation=80666a11a09f,42913,1733400130845 2024-12-05T12:06:38,054 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=191, ppid=189, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=ef081bb0e17d22aa06240dd7c58dc8f6, UNASSIGN because future has completed 2024-12-05T12:06:38,055 DEBUG [PEWorker-2 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-05T12:06:38,055 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=192, ppid=191, state=RUNNABLE, hasLock=false; CloseRegionProcedure ef081bb0e17d22aa06240dd7c58dc8f6, server=80666a11a09f,42305,1733400131012}] 2024-12-05T12:06:38,055 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=190, ppid=189, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=32910da2d48395a4bc0ab1f392e25ded, UNASSIGN because future has completed 2024-12-05T12:06:38,055 DEBUG [PEWorker-1 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-05T12:06:38,055 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=193, ppid=190, state=RUNNABLE, hasLock=false; CloseRegionProcedure 32910da2d48395a4bc0ab1f392e25ded, server=80666a11a09f,42913,1733400130845}] 2024-12-05T12:06:38,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=188 2024-12-05T12:06:38,207 INFO [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=192}] handler.UnassignRegionHandler(122): Close ef081bb0e17d22aa06240dd7c58dc8f6 2024-12-05T12:06:38,207 DEBUG [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=192}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-05T12:06:38,207 DEBUG [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=192}] regionserver.HRegion(1722): Closing ef081bb0e17d22aa06240dd7c58dc8f6, disabling compactions & flushes 2024-12-05T12:06:38,207 INFO [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=192}] regionserver.HRegion(1755): Closing region testtb-testExportExpiredSnapshot,1,1733400385656.ef081bb0e17d22aa06240dd7c58dc8f6. 2024-12-05T12:06:38,207 DEBUG [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=192}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportExpiredSnapshot,1,1733400385656.ef081bb0e17d22aa06240dd7c58dc8f6. 2024-12-05T12:06:38,207 DEBUG [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=192}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportExpiredSnapshot,1,1733400385656.ef081bb0e17d22aa06240dd7c58dc8f6. after waiting 0 ms 2024-12-05T12:06:38,207 DEBUG [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=192}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportExpiredSnapshot,1,1733400385656.ef081bb0e17d22aa06240dd7c58dc8f6. 2024-12-05T12:06:38,208 INFO [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=193}] handler.UnassignRegionHandler(122): Close 32910da2d48395a4bc0ab1f392e25ded 2024-12-05T12:06:38,208 DEBUG [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=193}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-05T12:06:38,208 DEBUG [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=193}] regionserver.HRegion(1722): Closing 32910da2d48395a4bc0ab1f392e25ded, disabling compactions & flushes 2024-12-05T12:06:38,208 INFO [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=193}] regionserver.HRegion(1755): Closing region testtb-testExportExpiredSnapshot,,1733400385656.32910da2d48395a4bc0ab1f392e25ded. 2024-12-05T12:06:38,208 DEBUG [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=193}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportExpiredSnapshot,,1733400385656.32910da2d48395a4bc0ab1f392e25ded. 2024-12-05T12:06:38,208 DEBUG [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=193}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportExpiredSnapshot,,1733400385656.32910da2d48395a4bc0ab1f392e25ded. after waiting 0 ms 2024-12-05T12:06:38,208 DEBUG [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=193}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportExpiredSnapshot,,1733400385656.32910da2d48395a4bc0ab1f392e25ded. 2024-12-05T12:06:38,211 DEBUG [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=192}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportExpiredSnapshot/ef081bb0e17d22aa06240dd7c58dc8f6/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-05T12:06:38,211 DEBUG [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=193}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportExpiredSnapshot/32910da2d48395a4bc0ab1f392e25ded/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-05T12:06:38,211 DEBUG [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=192}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-05T12:06:38,211 DEBUG [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=193}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-05T12:06:38,211 INFO [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=192}] regionserver.HRegion(1973): Closed testtb-testExportExpiredSnapshot,1,1733400385656.ef081bb0e17d22aa06240dd7c58dc8f6. 2024-12-05T12:06:38,211 DEBUG [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=192}] regionserver.HRegion(1676): Region close journal for ef081bb0e17d22aa06240dd7c58dc8f6: Waiting for close lock at 1733400398207Running coprocessor pre-close hooks at 1733400398207Disabling compacts and flushes for region at 1733400398207Disabling writes for close at 1733400398207Writing region close event to WAL at 1733400398208 (+1 ms)Running coprocessor post-close hooks at 1733400398211 (+3 ms)Closed at 1733400398211 2024-12-05T12:06:38,211 INFO [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=193}] regionserver.HRegion(1973): Closed testtb-testExportExpiredSnapshot,,1733400385656.32910da2d48395a4bc0ab1f392e25ded. 2024-12-05T12:06:38,211 DEBUG [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=193}] regionserver.HRegion(1676): Region close journal for 32910da2d48395a4bc0ab1f392e25ded: Waiting for close lock at 1733400398208Running coprocessor pre-close hooks at 1733400398208Disabling compacts and flushes for region at 1733400398208Disabling writes for close at 1733400398208Writing region close event to WAL at 1733400398209 (+1 ms)Running coprocessor post-close hooks at 1733400398211 (+2 ms)Closed at 1733400398211 2024-12-05T12:06:38,213 INFO [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=192}] handler.UnassignRegionHandler(157): Closed ef081bb0e17d22aa06240dd7c58dc8f6 2024-12-05T12:06:38,213 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=191 updating hbase:meta row=ef081bb0e17d22aa06240dd7c58dc8f6, regionState=CLOSED 2024-12-05T12:06:38,213 INFO [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=193}] handler.UnassignRegionHandler(157): Closed 32910da2d48395a4bc0ab1f392e25ded 2024-12-05T12:06:38,214 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=190 updating hbase:meta row=32910da2d48395a4bc0ab1f392e25ded, regionState=CLOSED 2024-12-05T12:06:38,214 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=192, ppid=191, state=RUNNABLE, hasLock=false; CloseRegionProcedure ef081bb0e17d22aa06240dd7c58dc8f6, server=80666a11a09f,42305,1733400131012 because future has completed 2024-12-05T12:06:38,215 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=193, ppid=190, state=RUNNABLE, hasLock=false; CloseRegionProcedure 32910da2d48395a4bc0ab1f392e25ded, server=80666a11a09f,42913,1733400130845 because future has completed 2024-12-05T12:06:38,217 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=192, resume processing ppid=191 2024-12-05T12:06:38,217 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=192, ppid=191, state=SUCCESS, hasLock=false; CloseRegionProcedure ef081bb0e17d22aa06240dd7c58dc8f6, server=80666a11a09f,42305,1733400131012 in 160 msec 2024-12-05T12:06:38,217 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=191, ppid=189, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=ef081bb0e17d22aa06240dd7c58dc8f6, UNASSIGN in 165 msec 2024-12-05T12:06:38,217 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=193, resume processing ppid=190 2024-12-05T12:06:38,218 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=193, ppid=190, state=SUCCESS, hasLock=false; CloseRegionProcedure 32910da2d48395a4bc0ab1f392e25ded, server=80666a11a09f,42913,1733400130845 in 161 msec 2024-12-05T12:06:38,219 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=190, resume processing ppid=189 2024-12-05T12:06:38,219 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=190, ppid=189, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=32910da2d48395a4bc0ab1f392e25ded, UNASSIGN in 166 msec 2024-12-05T12:06:38,221 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=189, resume processing ppid=188 2024-12-05T12:06:38,221 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=189, ppid=188, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportExpiredSnapshot in 169 msec 2024-12-05T12:06:38,222 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportExpiredSnapshot","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733400398222"}]},"ts":"1733400398222"} 2024-12-05T12:06:38,223 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportExpiredSnapshot, state=DISABLED in hbase:meta 2024-12-05T12:06:38,223 INFO [PEWorker-1 {}] procedure.DisableTableProcedure(296): Set testtb-testExportExpiredSnapshot to state=DISABLED 2024-12-05T12:06:38,225 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=188, state=SUCCESS, hasLock=false; DisableTableProcedure table=testtb-testExportExpiredSnapshot in 180 msec 2024-12-05T12:06:38,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=188 2024-12-05T12:06:38,367 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testtb-testExportExpiredSnapshot completed 2024-12-05T12:06:38,368 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.2 delete testtb-testExportExpiredSnapshot 2024-12-05T12:06:38,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] procedure2.ProcedureExecutor(1139): Stored pid=194, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testtb-testExportExpiredSnapshot 2024-12-05T12:06:38,369 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=194, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportExpiredSnapshot 2024-12-05T12:06:38,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] access.PermissionStorage(261): Removing permissions of removed table testtb-testExportExpiredSnapshot 2024-12-05T12:06:38,370 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=194, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testtb-testExportExpiredSnapshot 2024-12-05T12:06:38,372 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42305 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testtb-testExportExpiredSnapshot 2024-12-05T12:06:38,373 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportExpiredSnapshot/32910da2d48395a4bc0ab1f392e25ded 2024-12-05T12:06:38,373 DEBUG [HFileArchiver-22 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportExpiredSnapshot/ef081bb0e17d22aa06240dd7c58dc8f6 2024-12-05T12:06:38,374 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportExpiredSnapshot/32910da2d48395a4bc0ab1f392e25ded/cf, FileablePath, hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportExpiredSnapshot/32910da2d48395a4bc0ab1f392e25ded/recovered.edits] 2024-12-05T12:06:38,374 DEBUG [HFileArchiver-22 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportExpiredSnapshot/ef081bb0e17d22aa06240dd7c58dc8f6/cf, FileablePath, hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportExpiredSnapshot/ef081bb0e17d22aa06240dd7c58dc8f6/recovered.edits] 2024-12-05T12:06:38,377 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportExpiredSnapshot/32910da2d48395a4bc0ab1f392e25ded/cf/5597b8b25d824f918f0ead5cd13aabcc to hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/archive/data/default/testtb-testExportExpiredSnapshot/32910da2d48395a4bc0ab1f392e25ded/cf/5597b8b25d824f918f0ead5cd13aabcc 2024-12-05T12:06:38,377 DEBUG [HFileArchiver-22 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportExpiredSnapshot/ef081bb0e17d22aa06240dd7c58dc8f6/cf/51c879091292488584c9b95e0ab2ff83 to hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/archive/data/default/testtb-testExportExpiredSnapshot/ef081bb0e17d22aa06240dd7c58dc8f6/cf/51c879091292488584c9b95e0ab2ff83 2024-12-05T12:06:38,379 DEBUG [HFileArchiver-22 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportExpiredSnapshot/ef081bb0e17d22aa06240dd7c58dc8f6/recovered.edits/9.seqid to hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/archive/data/default/testtb-testExportExpiredSnapshot/ef081bb0e17d22aa06240dd7c58dc8f6/recovered.edits/9.seqid 2024-12-05T12:06:38,379 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportExpiredSnapshot/32910da2d48395a4bc0ab1f392e25ded/recovered.edits/9.seqid to hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/archive/data/default/testtb-testExportExpiredSnapshot/32910da2d48395a4bc0ab1f392e25ded/recovered.edits/9.seqid 2024-12-05T12:06:38,379 DEBUG [HFileArchiver-22 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportExpiredSnapshot/ef081bb0e17d22aa06240dd7c58dc8f6 2024-12-05T12:06:38,379 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportExpiredSnapshot/32910da2d48395a4bc0ab1f392e25ded 2024-12-05T12:06:38,379 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(313): Archived testtb-testExportExpiredSnapshot regions 2024-12-05T12:06:38,379 DEBUG [PEWorker-3 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/mobdir/data/default/testtb-testExportExpiredSnapshot/3d21f28acea2939462cb18e5d9576f2f 2024-12-05T12:06:38,380 DEBUG [PEWorker-3 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/mobdir/data/default/testtb-testExportExpiredSnapshot/3d21f28acea2939462cb18e5d9576f2f/cf] 2024-12-05T12:06:38,382 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/mobdir/data/default/testtb-testExportExpiredSnapshot/3d21f28acea2939462cb18e5d9576f2f/cf/c4ca4238a0b923820dcc509a6f75849b20241205de66f8371bcc488b8e00e024707f0dde_ef081bb0e17d22aa06240dd7c58dc8f6 to hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/archive/data/default/testtb-testExportExpiredSnapshot/3d21f28acea2939462cb18e5d9576f2f/cf/c4ca4238a0b923820dcc509a6f75849b20241205de66f8371bcc488b8e00e024707f0dde_ef081bb0e17d22aa06240dd7c58dc8f6 2024-12-05T12:06:38,384 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/mobdir/data/default/testtb-testExportExpiredSnapshot/3d21f28acea2939462cb18e5d9576f2f/cf/d41d8cd98f00b204e9800998ecf8427e20241205ec385a3eaad84b97800c0045f7afa259_32910da2d48395a4bc0ab1f392e25ded to hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/archive/data/default/testtb-testExportExpiredSnapshot/3d21f28acea2939462cb18e5d9576f2f/cf/d41d8cd98f00b204e9800998ecf8427e20241205ec385a3eaad84b97800c0045f7afa259_32910da2d48395a4bc0ab1f392e25ded 2024-12-05T12:06:38,384 DEBUG [PEWorker-3 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/mobdir/data/default/testtb-testExportExpiredSnapshot/3d21f28acea2939462cb18e5d9576f2f 2024-12-05T12:06:38,385 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=194, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testtb-testExportExpiredSnapshot 2024-12-05T12:06:38,387 WARN [PEWorker-3 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 2 rows of testtb-testExportExpiredSnapshot from hbase:meta 2024-12-05T12:06:38,389 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(407): Removing 'testtb-testExportExpiredSnapshot' descriptor. 2024-12-05T12:06:38,390 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=194, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testtb-testExportExpiredSnapshot 2024-12-05T12:06:38,390 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(397): Removing 'testtb-testExportExpiredSnapshot' from region states. 2024-12-05T12:06:38,390 DEBUG [PEWorker-3 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportExpiredSnapshot,,1733400385656.32910da2d48395a4bc0ab1f392e25ded.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733400398390"}]},"ts":"9223372036854775807"} 2024-12-05T12:06:38,390 DEBUG [PEWorker-3 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportExpiredSnapshot,1,1733400385656.ef081bb0e17d22aa06240dd7c58dc8f6.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733400398390"}]},"ts":"9223372036854775807"} 2024-12-05T12:06:38,392 INFO [PEWorker-3 {}] assignment.RegionStateStore(562): Deleted 2 regions from META 2024-12-05T12:06:38,392 DEBUG [PEWorker-3 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => 32910da2d48395a4bc0ab1f392e25ded, NAME => 'testtb-testExportExpiredSnapshot,,1733400385656.32910da2d48395a4bc0ab1f392e25ded.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => ef081bb0e17d22aa06240dd7c58dc8f6, NAME => 'testtb-testExportExpiredSnapshot,1,1733400385656.ef081bb0e17d22aa06240dd7c58dc8f6.', STARTKEY => '1', ENDKEY => ''}] 2024-12-05T12:06:38,392 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(401): Marking 'testtb-testExportExpiredSnapshot' as deleted. 2024-12-05T12:06:38,392 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testtb-testExportExpiredSnapshot","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733400398392"}]},"ts":"9223372036854775807"} 2024-12-05T12:06:38,394 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(867): Deleted table testtb-testExportExpiredSnapshot state from META 2024-12-05T12:06:38,394 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(133): Finished pid=194, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportExpiredSnapshot 2024-12-05T12:06:38,395 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=194, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testtb-testExportExpiredSnapshot in 26 msec 2024-12-05T12:06:38,419 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34947-0x101a6a890970000, quorum=127.0.0.1:49240, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportExpiredSnapshot 2024-12-05T12:06:38,419 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37629-0x101a6a890970002, quorum=127.0.0.1:49240, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportExpiredSnapshot 2024-12-05T12:06:38,419 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42305-0x101a6a890970003, quorum=127.0.0.1:49240, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportExpiredSnapshot 2024-12-05T12:06:38,419 DEBUG [pool-63-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42913-0x101a6a890970001, quorum=127.0.0.1:49240, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportExpiredSnapshot 2024-12-05T12:06:38,419 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF 2024-12-05T12:06:38,419 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF 2024-12-05T12:06:38,419 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF 2024-12-05T12:06:38,420 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF 2024-12-05T12:06:38,427 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37629-0x101a6a890970002, quorum=127.0.0.1:49240, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportExpiredSnapshot 2024-12-05T12:06:38,427 DEBUG [pool-63-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42913-0x101a6a890970001, quorum=127.0.0.1:49240, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportExpiredSnapshot 2024-12-05T12:06:38,427 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42305-0x101a6a890970003, quorum=127.0.0.1:49240, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportExpiredSnapshot 2024-12-05T12:06:38,427 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34947-0x101a6a890970000, quorum=127.0.0.1:49240, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportExpiredSnapshot 2024-12-05T12:06:38,427 DEBUG [pool-63-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42913-0x101a6a890970001, quorum=127.0.0.1:49240, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-05T12:06:38,427 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37629-0x101a6a890970002, quorum=127.0.0.1:49240, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-05T12:06:38,427 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42305-0x101a6a890970003, quorum=127.0.0.1:49240, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-05T12:06:38,427 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34947-0x101a6a890970000, quorum=127.0.0.1:49240, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-05T12:06:38,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=194 2024-12-05T12:06:38,428 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-05T12:06:38,428 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testtb-testExportExpiredSnapshot 2024-12-05T12:06:38,428 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-05T12:06:38,428 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-05T12:06:38,428 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testtb-testExportExpiredSnapshot completed 2024-12-05T12:06:38,429 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-05T12:06:38,434 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "emptySnaptb0-testExportExpiredSnapshot" type: DISABLED 2024-12-05T12:06:38,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] snapshot.SnapshotManager(381): Deleting snapshot: emptySnaptb0-testExportExpiredSnapshot 2024-12-05T12:06:38,437 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "snapshot-testExportExpiredSnapshot" type: DISABLED 2024-12-05T12:06:38,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] snapshot.SnapshotManager(381): Deleting snapshot: snapshot-testExportExpiredSnapshot 2024-12-05T12:06:38,440 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "snaptb0-testExportExpiredSnapshot" type: DISABLED 2024-12-05T12:06:38,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] snapshot.SnapshotManager(381): Deleting snapshot: snaptb0-testExportExpiredSnapshot 2024-12-05T12:06:38,461 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestMobSecureExportSnapshot#testExportExpiredSnapshot Thread=811 (was 819), OpenFileDescriptor=793 (was 825), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=661 (was 735), ProcessCount=14 (was 14), AvailableMemoryMB=2759 (was 2821) 2024-12-05T12:06:38,461 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=811 is superior to 500 2024-12-05T12:06:38,477 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestMobSecureExportSnapshot#testEmptyExportFileSystemState Thread=811, OpenFileDescriptor=793, MaxFileDescriptor=1048576, SystemLoadAverage=661, ProcessCount=14, AvailableMemoryMB=2759 2024-12-05T12:06:38,477 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=811 is superior to 500 2024-12-05T12:06:38,478 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'testtb-testEmptyExportFileSystemState', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-05T12:06:38,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] procedure2.ProcedureExecutor(1139): Stored pid=195, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testtb-testEmptyExportFileSystemState 2024-12-05T12:06:38,480 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=195, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testEmptyExportFileSystemState execute state=CREATE_TABLE_PRE_OPERATION 2024-12-05T12:06:38,481 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testEmptyExportFileSystemState" procId is: 195 2024-12-05T12:06:38,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=195 2024-12-05T12:06:38,481 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=195, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testtb-testEmptyExportFileSystemState execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-05T12:06:38,490 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742271_1447 (size=448) 2024-12-05T12:06:38,490 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742271_1447 (size=448) 2024-12-05T12:06:38,490 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742271_1447 (size=448) 2024-12-05T12:06:38,492 INFO [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 84f09b7b3f85c4035afdd6af15adcc69, NAME => 'testtb-testEmptyExportFileSystemState,,1733400398478.84f09b7b3f85c4035afdd6af15adcc69.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testEmptyExportFileSystemState', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300 2024-12-05T12:06:38,492 INFO [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => 9cf00a5e9699ea73047ff1d5c1df4943, NAME => 'testtb-testEmptyExportFileSystemState,1,1733400398478.9cf00a5e9699ea73047ff1d5c1df4943.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testEmptyExportFileSystemState', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300 2024-12-05T12:06:38,498 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742272_1448 (size=73) 2024-12-05T12:06:38,498 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742273_1449 (size=73) 2024-12-05T12:06:38,498 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742272_1448 (size=73) 2024-12-05T12:06:38,498 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742272_1448 (size=73) 2024-12-05T12:06:38,499 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(898): Instantiated testtb-testEmptyExportFileSystemState,1,1733400398478.9cf00a5e9699ea73047ff1d5c1df4943.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-05T12:06:38,499 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(1722): Closing 9cf00a5e9699ea73047ff1d5c1df4943, disabling compactions & flushes 2024-12-05T12:06:38,499 INFO [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(1755): Closing region testtb-testEmptyExportFileSystemState,1,1733400398478.9cf00a5e9699ea73047ff1d5c1df4943. 2024-12-05T12:06:38,499 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testEmptyExportFileSystemState,1,1733400398478.9cf00a5e9699ea73047ff1d5c1df4943. 2024-12-05T12:06:38,499 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(898): Instantiated testtb-testEmptyExportFileSystemState,,1733400398478.84f09b7b3f85c4035afdd6af15adcc69.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-05T12:06:38,499 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testEmptyExportFileSystemState,1,1733400398478.9cf00a5e9699ea73047ff1d5c1df4943. after waiting 0 ms 2024-12-05T12:06:38,499 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testEmptyExportFileSystemState,1,1733400398478.9cf00a5e9699ea73047ff1d5c1df4943. 2024-12-05T12:06:38,499 INFO [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(1973): Closed testtb-testEmptyExportFileSystemState,1,1733400398478.9cf00a5e9699ea73047ff1d5c1df4943. 2024-12-05T12:06:38,499 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(1722): Closing 84f09b7b3f85c4035afdd6af15adcc69, disabling compactions & flushes 2024-12-05T12:06:38,499 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(1676): Region close journal for 9cf00a5e9699ea73047ff1d5c1df4943: Waiting for close lock at 1733400398499Disabling compacts and flushes for region at 1733400398499Disabling writes for close at 1733400398499Writing region close event to WAL at 1733400398499Closed at 1733400398499 2024-12-05T12:06:38,499 INFO [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(1755): Closing region testtb-testEmptyExportFileSystemState,,1733400398478.84f09b7b3f85c4035afdd6af15adcc69. 2024-12-05T12:06:38,499 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testEmptyExportFileSystemState,,1733400398478.84f09b7b3f85c4035afdd6af15adcc69. 2024-12-05T12:06:38,499 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testEmptyExportFileSystemState,,1733400398478.84f09b7b3f85c4035afdd6af15adcc69. after waiting 0 ms 2024-12-05T12:06:38,499 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testEmptyExportFileSystemState,,1733400398478.84f09b7b3f85c4035afdd6af15adcc69. 2024-12-05T12:06:38,499 INFO [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(1973): Closed testtb-testEmptyExportFileSystemState,,1733400398478.84f09b7b3f85c4035afdd6af15adcc69. 2024-12-05T12:06:38,499 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(1676): Region close journal for 84f09b7b3f85c4035afdd6af15adcc69: Waiting for close lock at 1733400398499Disabling compacts and flushes for region at 1733400398499Disabling writes for close at 1733400398499Writing region close event to WAL at 1733400398499Closed at 1733400398499 2024-12-05T12:06:38,500 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=195, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testtb-testEmptyExportFileSystemState execute state=CREATE_TABLE_ADD_TO_META 2024-12-05T12:06:38,500 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testEmptyExportFileSystemState,1,1733400398478.9cf00a5e9699ea73047ff1d5c1df4943.","families":{"info":[{"qualifier":"regioninfo","vlen":72,"tag":[],"timestamp":"1733400398500"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733400398500"}]},"ts":"1733400398500"} 2024-12-05T12:06:38,500 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testEmptyExportFileSystemState,,1733400398478.84f09b7b3f85c4035afdd6af15adcc69.","families":{"info":[{"qualifier":"regioninfo","vlen":72,"tag":[],"timestamp":"1733400398500"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733400398500"}]},"ts":"1733400398500"} 2024-12-05T12:06:38,502 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742273_1449 (size=73) 2024-12-05T12:06:38,502 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742273_1449 (size=73) 2024-12-05T12:06:38,502 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-12-05T12:06:38,503 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=195, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testtb-testEmptyExportFileSystemState execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-05T12:06:38,503 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testEmptyExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733400398503"}]},"ts":"1733400398503"} 2024-12-05T12:06:38,504 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testEmptyExportFileSystemState, state=ENABLING in hbase:meta 2024-12-05T12:06:38,504 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(204): Hosts are {80666a11a09f=0} racks are {/default-rack=0} 2024-12-05T12:06:38,505 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-05T12:06:38,505 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-05T12:06:38,505 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-05T12:06:38,505 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-05T12:06:38,505 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-05T12:06:38,505 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-05T12:06:38,505 INFO [PEWorker-5 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-05T12:06:38,505 INFO [PEWorker-5 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-05T12:06:38,505 INFO [PEWorker-5 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-05T12:06:38,505 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-05T12:06:38,505 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=196, ppid=195, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=84f09b7b3f85c4035afdd6af15adcc69, ASSIGN}, {pid=197, ppid=195, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=9cf00a5e9699ea73047ff1d5c1df4943, ASSIGN}] 2024-12-05T12:06:38,506 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=196, ppid=195, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=84f09b7b3f85c4035afdd6af15adcc69, ASSIGN 2024-12-05T12:06:38,506 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=197, ppid=195, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=9cf00a5e9699ea73047ff1d5c1df4943, ASSIGN 2024-12-05T12:06:38,507 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=196, ppid=195, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=84f09b7b3f85c4035afdd6af15adcc69, ASSIGN; state=OFFLINE, location=80666a11a09f,42305,1733400131012; forceNewPlan=false, retain=false 2024-12-05T12:06:38,507 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=197, ppid=195, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=9cf00a5e9699ea73047ff1d5c1df4943, ASSIGN; state=OFFLINE, location=80666a11a09f,42913,1733400130845; forceNewPlan=false, retain=false 2024-12-05T12:06:38,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=195 2024-12-05T12:06:38,657 INFO [80666a11a09f:34947 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-12-05T12:06:38,658 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=197 updating hbase:meta row=9cf00a5e9699ea73047ff1d5c1df4943, regionState=OPENING, regionLocation=80666a11a09f,42913,1733400130845 2024-12-05T12:06:38,658 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=196 updating hbase:meta row=84f09b7b3f85c4035afdd6af15adcc69, regionState=OPENING, regionLocation=80666a11a09f,42305,1733400131012 2024-12-05T12:06:38,659 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=197, ppid=195, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=9cf00a5e9699ea73047ff1d5c1df4943, ASSIGN because future has completed 2024-12-05T12:06:38,659 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=198, ppid=197, state=RUNNABLE, hasLock=false; OpenRegionProcedure 9cf00a5e9699ea73047ff1d5c1df4943, server=80666a11a09f,42913,1733400130845}] 2024-12-05T12:06:38,660 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=196, ppid=195, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=84f09b7b3f85c4035afdd6af15adcc69, ASSIGN because future has completed 2024-12-05T12:06:38,660 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=199, ppid=196, state=RUNNABLE, hasLock=false; OpenRegionProcedure 84f09b7b3f85c4035afdd6af15adcc69, server=80666a11a09f,42305,1733400131012}] 2024-12-05T12:06:38,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=195 2024-12-05T12:06:38,813 INFO [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] handler.AssignRegionHandler(132): Open testtb-testEmptyExportFileSystemState,1,1733400398478.9cf00a5e9699ea73047ff1d5c1df4943. 2024-12-05T12:06:38,813 INFO [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] handler.AssignRegionHandler(132): Open testtb-testEmptyExportFileSystemState,,1733400398478.84f09b7b3f85c4035afdd6af15adcc69. 2024-12-05T12:06:38,814 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] regionserver.HRegion(7752): Opening region: {ENCODED => 9cf00a5e9699ea73047ff1d5c1df4943, NAME => 'testtb-testEmptyExportFileSystemState,1,1733400398478.9cf00a5e9699ea73047ff1d5c1df4943.', STARTKEY => '1', ENDKEY => ''} 2024-12-05T12:06:38,814 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] regionserver.HRegion(7752): Opening region: {ENCODED => 84f09b7b3f85c4035afdd6af15adcc69, NAME => 'testtb-testEmptyExportFileSystemState,,1733400398478.84f09b7b3f85c4035afdd6af15adcc69.', STARTKEY => '', ENDKEY => '1'} 2024-12-05T12:06:38,814 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testEmptyExportFileSystemState,,1733400398478.84f09b7b3f85c4035afdd6af15adcc69. service=AccessControlService 2024-12-05T12:06:38,814 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testEmptyExportFileSystemState,1,1733400398478.9cf00a5e9699ea73047ff1d5c1df4943. service=AccessControlService 2024-12-05T12:06:38,814 INFO [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-05T12:06:38,814 INFO [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-05T12:06:38,814 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testEmptyExportFileSystemState 84f09b7b3f85c4035afdd6af15adcc69 2024-12-05T12:06:38,814 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testEmptyExportFileSystemState 9cf00a5e9699ea73047ff1d5c1df4943 2024-12-05T12:06:38,814 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] regionserver.HRegion(898): Instantiated testtb-testEmptyExportFileSystemState,,1733400398478.84f09b7b3f85c4035afdd6af15adcc69.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-05T12:06:38,814 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] regionserver.HRegion(898): Instantiated testtb-testEmptyExportFileSystemState,1,1733400398478.9cf00a5e9699ea73047ff1d5c1df4943.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-05T12:06:38,814 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] regionserver.HRegion(7794): checking encryption for 84f09b7b3f85c4035afdd6af15adcc69 2024-12-05T12:06:38,814 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] regionserver.HRegion(7794): checking encryption for 9cf00a5e9699ea73047ff1d5c1df4943 2024-12-05T12:06:38,814 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] regionserver.HRegion(7797): checking classloading for 84f09b7b3f85c4035afdd6af15adcc69 2024-12-05T12:06:38,814 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] regionserver.HRegion(7797): checking classloading for 9cf00a5e9699ea73047ff1d5c1df4943 2024-12-05T12:06:38,816 INFO [StoreOpener-84f09b7b3f85c4035afdd6af15adcc69-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 84f09b7b3f85c4035afdd6af15adcc69 2024-12-05T12:06:38,816 INFO [StoreOpener-9cf00a5e9699ea73047ff1d5c1df4943-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 9cf00a5e9699ea73047ff1d5c1df4943 2024-12-05T12:06:38,817 INFO [StoreOpener-9cf00a5e9699ea73047ff1d5c1df4943-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 9cf00a5e9699ea73047ff1d5c1df4943 columnFamilyName cf 2024-12-05T12:06:38,817 INFO [StoreOpener-84f09b7b3f85c4035afdd6af15adcc69-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 84f09b7b3f85c4035afdd6af15adcc69 columnFamilyName cf 2024-12-05T12:06:38,818 DEBUG [StoreOpener-9cf00a5e9699ea73047ff1d5c1df4943-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T12:06:38,818 INFO [StoreOpener-9cf00a5e9699ea73047ff1d5c1df4943-1 {}] regionserver.HStore(327): Store=9cf00a5e9699ea73047ff1d5c1df4943/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-05T12:06:38,819 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] regionserver.HRegion(1038): replaying wal for 9cf00a5e9699ea73047ff1d5c1df4943 2024-12-05T12:06:38,819 DEBUG [StoreOpener-84f09b7b3f85c4035afdd6af15adcc69-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T12:06:38,819 INFO [StoreOpener-84f09b7b3f85c4035afdd6af15adcc69-1 {}] regionserver.HStore(327): Store=84f09b7b3f85c4035afdd6af15adcc69/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-05T12:06:38,819 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testEmptyExportFileSystemState/9cf00a5e9699ea73047ff1d5c1df4943 2024-12-05T12:06:38,820 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] regionserver.HRegion(1038): replaying wal for 84f09b7b3f85c4035afdd6af15adcc69 2024-12-05T12:06:38,820 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testEmptyExportFileSystemState/9cf00a5e9699ea73047ff1d5c1df4943 2024-12-05T12:06:38,820 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] regionserver.HRegion(1048): stopping wal replay for 9cf00a5e9699ea73047ff1d5c1df4943 2024-12-05T12:06:38,820 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] regionserver.HRegion(1060): Cleaning up temporary data for 9cf00a5e9699ea73047ff1d5c1df4943 2024-12-05T12:06:38,820 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testEmptyExportFileSystemState/84f09b7b3f85c4035afdd6af15adcc69 2024-12-05T12:06:38,821 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testEmptyExportFileSystemState/84f09b7b3f85c4035afdd6af15adcc69 2024-12-05T12:06:38,821 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] regionserver.HRegion(1048): stopping wal replay for 84f09b7b3f85c4035afdd6af15adcc69 2024-12-05T12:06:38,821 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] regionserver.HRegion(1060): Cleaning up temporary data for 84f09b7b3f85c4035afdd6af15adcc69 2024-12-05T12:06:38,821 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] regionserver.HRegion(1093): writing seq id for 9cf00a5e9699ea73047ff1d5c1df4943 2024-12-05T12:06:38,822 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] regionserver.HRegion(1093): writing seq id for 84f09b7b3f85c4035afdd6af15adcc69 2024-12-05T12:06:38,824 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testEmptyExportFileSystemState/9cf00a5e9699ea73047ff1d5c1df4943/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-05T12:06:38,824 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testEmptyExportFileSystemState/84f09b7b3f85c4035afdd6af15adcc69/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-05T12:06:38,824 INFO [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] regionserver.HRegion(1114): Opened 9cf00a5e9699ea73047ff1d5c1df4943; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=62374927, jitterRate=-0.07054115831851959}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-05T12:06:38,824 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 9cf00a5e9699ea73047ff1d5c1df4943 2024-12-05T12:06:38,825 INFO [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] regionserver.HRegion(1114): Opened 84f09b7b3f85c4035afdd6af15adcc69; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=71494525, jitterRate=0.0653514415025711}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-05T12:06:38,825 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 84f09b7b3f85c4035afdd6af15adcc69 2024-12-05T12:06:38,825 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] regionserver.HRegion(1006): Region open journal for 84f09b7b3f85c4035afdd6af15adcc69: Running coprocessor pre-open hook at 1733400398814Writing region info on filesystem at 1733400398814Initializing all the Stores at 1733400398815 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733400398815Cleaning up temporary data from old regions at 1733400398821 (+6 ms)Running coprocessor post-open hooks at 1733400398825 (+4 ms)Region opened successfully at 1733400398825 2024-12-05T12:06:38,825 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] regionserver.HRegion(1006): Region open journal for 9cf00a5e9699ea73047ff1d5c1df4943: Running coprocessor pre-open hook at 1733400398814Writing region info on filesystem at 1733400398814Initializing all the Stores at 1733400398815 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733400398815Cleaning up temporary data from old regions at 1733400398820 (+5 ms)Running coprocessor post-open hooks at 1733400398824 (+4 ms)Region opened successfully at 1733400398825 (+1 ms) 2024-12-05T12:06:38,826 INFO [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testEmptyExportFileSystemState,,1733400398478.84f09b7b3f85c4035afdd6af15adcc69., pid=199, masterSystemTime=1733400398811 2024-12-05T12:06:38,826 INFO [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testEmptyExportFileSystemState,1,1733400398478.9cf00a5e9699ea73047ff1d5c1df4943., pid=198, masterSystemTime=1733400398811 2024-12-05T12:06:38,827 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testEmptyExportFileSystemState,,1733400398478.84f09b7b3f85c4035afdd6af15adcc69. 2024-12-05T12:06:38,827 INFO [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] handler.AssignRegionHandler(153): Opened testtb-testEmptyExportFileSystemState,,1733400398478.84f09b7b3f85c4035afdd6af15adcc69. 2024-12-05T12:06:38,828 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=196 updating hbase:meta row=84f09b7b3f85c4035afdd6af15adcc69, regionState=OPEN, openSeqNum=2, regionLocation=80666a11a09f,42305,1733400131012 2024-12-05T12:06:38,828 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testEmptyExportFileSystemState,1,1733400398478.9cf00a5e9699ea73047ff1d5c1df4943. 2024-12-05T12:06:38,828 INFO [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] handler.AssignRegionHandler(153): Opened testtb-testEmptyExportFileSystemState,1,1733400398478.9cf00a5e9699ea73047ff1d5c1df4943. 2024-12-05T12:06:38,828 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=197 updating hbase:meta row=9cf00a5e9699ea73047ff1d5c1df4943, regionState=OPEN, openSeqNum=2, regionLocation=80666a11a09f,42913,1733400130845 2024-12-05T12:06:38,829 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=199, ppid=196, state=RUNNABLE, hasLock=false; OpenRegionProcedure 84f09b7b3f85c4035afdd6af15adcc69, server=80666a11a09f,42305,1733400131012 because future has completed 2024-12-05T12:06:38,830 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=198, ppid=197, state=RUNNABLE, hasLock=false; OpenRegionProcedure 9cf00a5e9699ea73047ff1d5c1df4943, server=80666a11a09f,42913,1733400130845 because future has completed 2024-12-05T12:06:38,832 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=199, resume processing ppid=196 2024-12-05T12:06:38,832 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=199, ppid=196, state=SUCCESS, hasLock=false; OpenRegionProcedure 84f09b7b3f85c4035afdd6af15adcc69, server=80666a11a09f,42305,1733400131012 in 171 msec 2024-12-05T12:06:38,833 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=198, resume processing ppid=197 2024-12-05T12:06:38,833 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=198, ppid=197, state=SUCCESS, hasLock=false; OpenRegionProcedure 9cf00a5e9699ea73047ff1d5c1df4943, server=80666a11a09f,42913,1733400130845 in 172 msec 2024-12-05T12:06:38,833 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=196, ppid=195, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=84f09b7b3f85c4035afdd6af15adcc69, ASSIGN in 327 msec 2024-12-05T12:06:38,834 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=197, resume processing ppid=195 2024-12-05T12:06:38,834 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=197, ppid=195, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=9cf00a5e9699ea73047ff1d5c1df4943, ASSIGN in 328 msec 2024-12-05T12:06:38,834 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=195, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testtb-testEmptyExportFileSystemState execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-05T12:06:38,835 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testEmptyExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733400398834"}]},"ts":"1733400398834"} 2024-12-05T12:06:38,836 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testEmptyExportFileSystemState, state=ENABLED in hbase:meta 2024-12-05T12:06:38,838 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=195, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testEmptyExportFileSystemState execute state=CREATE_TABLE_POST_OPERATION 2024-12-05T12:06:38,839 DEBUG [PEWorker-2 {}] access.PermissionStorage(177): Writing permission with rowKey testtb-testEmptyExportFileSystemState jenkins: RWXCA 2024-12-05T12:06:38,841 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42305 {}] access.PermissionStorage(613): Read acl: entry[testtb-testEmptyExportFileSystemState], kv [jenkins: RWXCA] 2024-12-05T12:06:38,885 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37629-0x101a6a890970002, quorum=127.0.0.1:49240, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-05T12:06:38,885 DEBUG [pool-63-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42913-0x101a6a890970001, quorum=127.0.0.1:49240, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-05T12:06:38,885 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42305-0x101a6a890970003, quorum=127.0.0.1:49240, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-05T12:06:38,885 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34947-0x101a6a890970000, quorum=127.0.0.1:49240, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-05T12:06:38,895 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testEmptyExportFileSystemState with data PBUF\x0AK\x0A\x07jenkins\x12@\x08\x03"<\x0A0\x0A\x07default\x12%testtb-testEmptyExportFileSystemState \x00 \x01 \x02 \x03 \x04 2024-12-05T12:06:38,895 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testEmptyExportFileSystemState with data PBUF\x0AK\x0A\x07jenkins\x12@\x08\x03"<\x0A0\x0A\x07default\x12%testtb-testEmptyExportFileSystemState \x00 \x01 \x02 \x03 \x04 2024-12-05T12:06:38,895 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testEmptyExportFileSystemState with data PBUF\x0AK\x0A\x07jenkins\x12@\x08\x03"<\x0A0\x0A\x07default\x12%testtb-testEmptyExportFileSystemState \x00 \x01 \x02 \x03 \x04 2024-12-05T12:06:38,895 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testEmptyExportFileSystemState with data PBUF\x0AK\x0A\x07jenkins\x12@\x08\x03"<\x0A0\x0A\x07default\x12%testtb-testEmptyExportFileSystemState \x00 \x01 \x02 \x03 \x04 2024-12-05T12:06:38,895 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-05T12:06:38,895 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-05T12:06:38,895 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-05T12:06:38,895 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-05T12:06:38,896 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=195, state=SUCCESS, hasLock=false; CreateTableProcedure table=testtb-testEmptyExportFileSystemState in 416 msec 2024-12-05T12:06:38,982 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-05T12:06:39,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=195 2024-12-05T12:06:39,108 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testtb-testEmptyExportFileSystemState completed 2024-12-05T12:06:39,108 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testEmptyExportFileSystemState,, stopping at row=testtb-testEmptyExportFileSystemState ,, for max=2147483647 with caching=100 2024-12-05T12:06:39,110 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testEmptyExportFileSystemState 2024-12-05T12:06:39,110 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testEmptyExportFileSystemState,,1733400398478.84f09b7b3f85c4035afdd6af15adcc69. 2024-12-05T12:06:39,110 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-05T12:06:39,112 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testEmptyExportFileSystemState,, stopping at row=testtb-testEmptyExportFileSystemState ,, for max=2147483647 with caching=100 2024-12-05T12:06:39,116 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testEmptyExportFileSystemState,, stopping at row=testtb-testEmptyExportFileSystemState ,, for max=2147483647 with caching=100 2024-12-05T12:06:39,121 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testEmptyExportFileSystemState,, stopping at row=testtb-testEmptyExportFileSystemState ,, for max=2147483647 with caching=100 2024-12-05T12:06:39,123 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } 2024-12-05T12:06:39,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1733400399123 (current time:1733400399123). 2024-12-05T12:06:39,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-05T12:06:39,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot emptySnaptb0-testEmptyExportFileSystemState VERSION not specified, setting to 2 2024-12-05T12:06:39,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-05T12:06:39,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@201b5b4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T12:06:39,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] client.ClusterIdFetcher(90): Going to request 80666a11a09f,34947,-1 for getting cluster id 2024-12-05T12:06:39,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-05T12:06:39,124 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'af3bc8f5-5d92-4279-9ca6-4b73c59b8f54' 2024-12-05T12:06:39,124 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-05T12:06:39,124 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "af3bc8f5-5d92-4279-9ca6-4b73c59b8f54" 2024-12-05T12:06:39,125 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1b8b7a16, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T12:06:39,125 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [80666a11a09f,34947,-1] 2024-12-05T12:06:39,125 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-05T12:06:39,125 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T12:06:39,126 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38670, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-05T12:06:39,126 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1a0d92a2, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T12:06:39,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-05T12:06:39,127 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=80666a11a09f,37629,1733400130958, seqNum=-1] 2024-12-05T12:06:39,127 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-05T12:06:39,128 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49044, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-05T12:06:39,129 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947. 2024-12-05T12:06:39,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-05T12:06:39,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T12:06:39,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T12:06:39,129 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-05T12:06:39,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@35d28f51, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T12:06:39,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] client.ClusterIdFetcher(90): Going to request 80666a11a09f,34947,-1 for getting cluster id 2024-12-05T12:06:39,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-05T12:06:39,130 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'af3bc8f5-5d92-4279-9ca6-4b73c59b8f54' 2024-12-05T12:06:39,131 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-05T12:06:39,131 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "af3bc8f5-5d92-4279-9ca6-4b73c59b8f54" 2024-12-05T12:06:39,131 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@438358ae, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T12:06:39,131 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [80666a11a09f,34947,-1] 2024-12-05T12:06:39,131 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-05T12:06:39,131 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T12:06:39,132 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38690, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-05T12:06:39,132 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3e70d84b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T12:06:39,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-05T12:06:39,133 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=80666a11a09f,37629,1733400130958, seqNum=-1] 2024-12-05T12:06:39,134 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-05T12:06:39,135 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49052, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-05T12:06:39,136 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testEmptyExportFileSystemState', locateType=CURRENT is [region=hbase:acl,,1733400133831.02119c1ced80736cbd0ab02ca19ee30c., hostname=80666a11a09f,42305,1733400131012, seqNum=2] 2024-12-05T12:06:39,136 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-05T12:06:39,137 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60806, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-05T12:06:39,138 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947. 2024-12-05T12:06:39,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor153.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-05T12:06:39,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T12:06:39,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T12:06:39,139 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-05T12:06:39,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] access.PermissionStorage(613): Read acl: entry[testtb-testEmptyExportFileSystemState], kv [jenkins: RWXCA] 2024-12-05T12:06:39,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-05T12:06:39,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] procedure2.ProcedureExecutor(1139): Stored pid=200, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=200, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } 2024-12-05T12:06:39,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 }, snapshot procedure id = 200 2024-12-05T12:06:39,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=200 2024-12-05T12:06:39,141 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=200, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=200, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-05T12:06:39,142 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=200, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=200, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-05T12:06:39,145 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=200, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=200, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-05T12:06:39,149 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742274_1450 (size=185) 2024-12-05T12:06:39,149 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742274_1450 (size=185) 2024-12-05T12:06:39,150 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742274_1450 (size=185) 2024-12-05T12:06:39,151 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=200, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=200, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-05T12:06:39,151 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=201, ppid=200, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 84f09b7b3f85c4035afdd6af15adcc69}, {pid=202, ppid=200, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 9cf00a5e9699ea73047ff1d5c1df4943}] 2024-12-05T12:06:39,152 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=202, ppid=200, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 9cf00a5e9699ea73047ff1d5c1df4943 2024-12-05T12:06:39,152 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=201, ppid=200, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 84f09b7b3f85c4035afdd6af15adcc69 2024-12-05T12:06:39,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=200 2024-12-05T12:06:39,303 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42305 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=201 2024-12-05T12:06:39,303 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42913 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=202 2024-12-05T12:06:39,303 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=202}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testEmptyExportFileSystemState,1,1733400398478.9cf00a5e9699ea73047ff1d5c1df4943. 2024-12-05T12:06:39,303 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=201}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testEmptyExportFileSystemState,,1733400398478.84f09b7b3f85c4035afdd6af15adcc69. 2024-12-05T12:06:39,303 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=202}] regionserver.HRegion(2603): Flush status journal for 9cf00a5e9699ea73047ff1d5c1df4943: 2024-12-05T12:06:39,303 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=201}] regionserver.HRegion(2603): Flush status journal for 84f09b7b3f85c4035afdd6af15adcc69: 2024-12-05T12:06:39,303 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=202}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testEmptyExportFileSystemState,1,1733400398478.9cf00a5e9699ea73047ff1d5c1df4943. for emptySnaptb0-testEmptyExportFileSystemState completed. 2024-12-05T12:06:39,303 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=201}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testEmptyExportFileSystemState,,1733400398478.84f09b7b3f85c4035afdd6af15adcc69. for emptySnaptb0-testEmptyExportFileSystemState completed. 2024-12-05T12:06:39,303 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=202}] snapshot.SnapshotManifest(241): Storing 'testtb-testEmptyExportFileSystemState,1,1733400398478.9cf00a5e9699ea73047ff1d5c1df4943.' region-info for snapshot=emptySnaptb0-testEmptyExportFileSystemState 2024-12-05T12:06:39,303 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=201}] snapshot.SnapshotManifest(241): Storing 'testtb-testEmptyExportFileSystemState,,1733400398478.84f09b7b3f85c4035afdd6af15adcc69.' region-info for snapshot=emptySnaptb0-testEmptyExportFileSystemState 2024-12-05T12:06:39,304 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=201}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-05T12:06:39,304 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=202}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-05T12:06:39,304 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=201}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-05T12:06:39,304 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=202}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-05T12:06:39,309 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742276_1452 (size=76) 2024-12-05T12:06:39,309 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742275_1451 (size=76) 2024-12-05T12:06:39,309 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742276_1452 (size=76) 2024-12-05T12:06:39,309 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742275_1451 (size=76) 2024-12-05T12:06:39,309 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742276_1452 (size=76) 2024-12-05T12:06:39,309 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=201}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testEmptyExportFileSystemState,,1733400398478.84f09b7b3f85c4035afdd6af15adcc69. 2024-12-05T12:06:39,309 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=201}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=201 2024-12-05T12:06:39,310 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742275_1451 (size=76) 2024-12-05T12:06:39,310 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=202}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testEmptyExportFileSystemState,1,1733400398478.9cf00a5e9699ea73047ff1d5c1df4943. 2024-12-05T12:06:39,310 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=202}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=202 2024-12-05T12:06:39,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] master.HMaster(4169): Remote procedure done, pid=201 2024-12-05T12:06:39,310 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testEmptyExportFileSystemState on region 84f09b7b3f85c4035afdd6af15adcc69 2024-12-05T12:06:39,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] master.HMaster(4169): Remote procedure done, pid=202 2024-12-05T12:06:39,310 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=201, ppid=200, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 84f09b7b3f85c4035afdd6af15adcc69 2024-12-05T12:06:39,310 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testEmptyExportFileSystemState on region 9cf00a5e9699ea73047ff1d5c1df4943 2024-12-05T12:06:39,310 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=202, ppid=200, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 9cf00a5e9699ea73047ff1d5c1df4943 2024-12-05T12:06:39,312 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=201, ppid=200, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 84f09b7b3f85c4035afdd6af15adcc69 in 160 msec 2024-12-05T12:06:39,312 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=202, resume processing ppid=200 2024-12-05T12:06:39,313 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=202, ppid=200, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 9cf00a5e9699ea73047ff1d5c1df4943 in 160 msec 2024-12-05T12:06:39,313 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=200, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=200, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-05T12:06:39,313 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=200, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=200, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-05T12:06:39,314 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-12-05T12:06:39,314 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-12-05T12:06:39,314 DEBUG [MobRegionSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T12:06:39,314 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(328): No files under family: cf 2024-12-05T12:06:39,319 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742277_1453 (size=68) 2024-12-05T12:06:39,319 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742277_1453 (size=68) 2024-12-05T12:06:39,319 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742277_1453 (size=68) 2024-12-05T12:06:39,320 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=200, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=200, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-05T12:06:39,320 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testEmptyExportFileSystemState 2024-12-05T12:06:39,321 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/.hbase-snapshot/.tmp/emptySnaptb0-testEmptyExportFileSystemState 2024-12-05T12:06:39,327 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742278_1454 (size=673) 2024-12-05T12:06:39,327 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742278_1454 (size=673) 2024-12-05T12:06:39,327 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742278_1454 (size=673) 2024-12-05T12:06:39,329 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=200, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=200, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-05T12:06:39,335 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=200, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=200, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-05T12:06:39,335 DEBUG [PEWorker-3 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/.hbase-snapshot/.tmp/emptySnaptb0-testEmptyExportFileSystemState to hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState 2024-12-05T12:06:39,337 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=200, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=200, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-05T12:06:39,337 DEBUG [PEWorker-3 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 }, snapshot procedure id = 200 2024-12-05T12:06:39,338 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=200, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=200, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } in 198 msec 2024-12-05T12:06:39,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=200 2024-12-05T12:06:39,458 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testEmptyExportFileSystemState completed 2024-12-05T12:06:39,463 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42305 {}] regionserver.HRegion(8528): writing data to region testtb-testEmptyExportFileSystemState,,1733400398478.84f09b7b3f85c4035afdd6af15adcc69. with WAL disabled. Data may be lost in the event of a crash. 2024-12-05T12:06:39,464 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42913 {}] regionserver.HRegion(8528): writing data to region testtb-testEmptyExportFileSystemState,1,1733400398478.9cf00a5e9699ea73047ff1d5c1df4943. with WAL disabled. Data may be lost in the event of a crash. 2024-12-05T12:06:39,465 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testEmptyExportFileSystemState,, stopping at row=testtb-testEmptyExportFileSystemState ,, for max=2147483647 with caching=100 2024-12-05T12:06:39,467 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testEmptyExportFileSystemState 2024-12-05T12:06:39,467 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testEmptyExportFileSystemState,,1733400398478.84f09b7b3f85c4035afdd6af15adcc69. 2024-12-05T12:06:39,467 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-05T12:06:39,469 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testEmptyExportFileSystemState,, stopping at row=testtb-testEmptyExportFileSystemState ,, for max=2147483647 with caching=100 2024-12-05T12:06:39,474 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testEmptyExportFileSystemState,, stopping at row=testtb-testEmptyExportFileSystemState ,, for max=2147483647 with caching=100 2024-12-05T12:06:39,481 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testEmptyExportFileSystemState,, stopping at row=testtb-testEmptyExportFileSystemState ,, for max=2147483647 with caching=100 2024-12-05T12:06:39,484 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } 2024-12-05T12:06:39,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1733400399484 (current time:1733400399484). 2024-12-05T12:06:39,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-05T12:06:39,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snaptb0-testEmptyExportFileSystemState VERSION not specified, setting to 2 2024-12-05T12:06:39,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-05T12:06:39,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5efc0666, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T12:06:39,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] client.ClusterIdFetcher(90): Going to request 80666a11a09f,34947,-1 for getting cluster id 2024-12-05T12:06:39,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-05T12:06:39,486 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'af3bc8f5-5d92-4279-9ca6-4b73c59b8f54' 2024-12-05T12:06:39,486 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-05T12:06:39,486 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "af3bc8f5-5d92-4279-9ca6-4b73c59b8f54" 2024-12-05T12:06:39,486 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@67ead820, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T12:06:39,486 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [80666a11a09f,34947,-1] 2024-12-05T12:06:39,486 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-05T12:06:39,487 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T12:06:39,487 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38704, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-05T12:06:39,488 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@72f30964, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T12:06:39,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-05T12:06:39,489 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=80666a11a09f,37629,1733400130958, seqNum=-1] 2024-12-05T12:06:39,489 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-05T12:06:39,490 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49068, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-05T12:06:39,491 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947. 2024-12-05T12:06:39,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-05T12:06:39,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T12:06:39,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T12:06:39,492 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-05T12:06:39,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6ee43c19, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T12:06:39,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] client.ClusterIdFetcher(90): Going to request 80666a11a09f,34947,-1 for getting cluster id 2024-12-05T12:06:39,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-05T12:06:39,493 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'af3bc8f5-5d92-4279-9ca6-4b73c59b8f54' 2024-12-05T12:06:39,493 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-05T12:06:39,493 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "af3bc8f5-5d92-4279-9ca6-4b73c59b8f54" 2024-12-05T12:06:39,493 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4493678c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T12:06:39,493 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [80666a11a09f,34947,-1] 2024-12-05T12:06:39,494 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-05T12:06:39,494 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T12:06:39,494 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38716, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-05T12:06:39,495 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1665bad6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T12:06:39,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-05T12:06:39,496 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=80666a11a09f,37629,1733400130958, seqNum=-1] 2024-12-05T12:06:39,496 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-05T12:06:39,497 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49078, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-05T12:06:39,498 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testEmptyExportFileSystemState', locateType=CURRENT is [region=hbase:acl,,1733400133831.02119c1ced80736cbd0ab02ca19ee30c., hostname=80666a11a09f,42305,1733400131012, seqNum=2] 2024-12-05T12:06:39,498 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-05T12:06:39,499 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60814, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-05T12:06:39,500 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947. 2024-12-05T12:06:39,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor153.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-05T12:06:39,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T12:06:39,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T12:06:39,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] access.PermissionStorage(613): Read acl: entry[testtb-testEmptyExportFileSystemState], kv [jenkins: RWXCA] 2024-12-05T12:06:39,501 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-05T12:06:39,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-05T12:06:39,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] procedure2.ProcedureExecutor(1139): Stored pid=203, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } 2024-12-05T12:06:39,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 }, snapshot procedure id = 203 2024-12-05T12:06:39,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=203 2024-12-05T12:06:39,503 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=203, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-05T12:06:39,504 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=203, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-05T12:06:39,506 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=203, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-05T12:06:39,513 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742279_1455 (size=180) 2024-12-05T12:06:39,513 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742279_1455 (size=180) 2024-12-05T12:06:39,513 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742279_1455 (size=180) 2024-12-05T12:06:39,514 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=203, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-05T12:06:39,515 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=204, ppid=203, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 84f09b7b3f85c4035afdd6af15adcc69}, {pid=205, ppid=203, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 9cf00a5e9699ea73047ff1d5c1df4943}] 2024-12-05T12:06:39,515 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=205, ppid=203, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 9cf00a5e9699ea73047ff1d5c1df4943 2024-12-05T12:06:39,516 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=204, ppid=203, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 84f09b7b3f85c4035afdd6af15adcc69 2024-12-05T12:06:39,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=203 2024-12-05T12:06:39,667 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42305 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=204 2024-12-05T12:06:39,667 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42913 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=205 2024-12-05T12:06:39,667 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testEmptyExportFileSystemState,,1733400398478.84f09b7b3f85c4035afdd6af15adcc69. 2024-12-05T12:06:39,667 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testEmptyExportFileSystemState,1,1733400398478.9cf00a5e9699ea73047ff1d5c1df4943. 2024-12-05T12:06:39,668 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] regionserver.HRegion(2902): Flushing 84f09b7b3f85c4035afdd6af15adcc69 1/1 column families, dataSize=65 B heapSize=400 B 2024-12-05T12:06:39,668 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] regionserver.HRegion(2902): Flushing 9cf00a5e9699ea73047ff1d5c1df4943 1/1 column families, dataSize=3.19 KB heapSize=7.14 KB 2024-12-05T12:06:39,683 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241205fa4033e8c57144fc9aa213e0b2fd25ba_84f09b7b3f85c4035afdd6af15adcc69 is 69, key is 083e40bd853b64d46db7d92b2abf00005/cf:q/1733400399463/Put/seqid=0 2024-12-05T12:06:39,689 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742280_1456 (size=4964) 2024-12-05T12:06:39,689 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742280_1456 (size=4964) 2024-12-05T12:06:39,689 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742280_1456 (size=4964) 2024-12-05T12:06:39,690 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b20241205092a08a807e843f0997ae37b4438260d_9cf00a5e9699ea73047ff1d5c1df4943 is 71, key is 11be48896afefddc6dca7675c5b8f47b/cf:q/1733400399464/Put/seqid=0 2024-12-05T12:06:39,690 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T12:06:39,694 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241205fa4033e8c57144fc9aa213e0b2fd25ba_84f09b7b3f85c4035afdd6af15adcc69 to hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/mobdir/data/default/testtb-testEmptyExportFileSystemState/37e3865d2ff7e5e2da33a03e3f723df0/cf/d41d8cd98f00b204e9800998ecf8427e20241205fa4033e8c57144fc9aa213e0b2fd25ba_84f09b7b3f85c4035afdd6af15adcc69 2024-12-05T12:06:39,695 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testEmptyExportFileSystemState/84f09b7b3f85c4035afdd6af15adcc69/.tmp/cf/c4109f5516c344c4b3494b5c68f3b790, store: [table=testtb-testEmptyExportFileSystemState family=cf region=84f09b7b3f85c4035afdd6af15adcc69] 2024-12-05T12:06:39,695 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testEmptyExportFileSystemState/84f09b7b3f85c4035afdd6af15adcc69/.tmp/cf/c4109f5516c344c4b3494b5c68f3b790 is 214, key is 083e40bd853b64d46db7d92b2abf00005/cf:q/1733400399463/Put/seqid=0 2024-12-05T12:06:39,697 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742281_1457 (size=8312) 2024-12-05T12:06:39,697 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742281_1457 (size=8312) 2024-12-05T12:06:39,697 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742281_1457 (size=8312) 2024-12-05T12:06:39,698 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T12:06:39,703 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b20241205092a08a807e843f0997ae37b4438260d_9cf00a5e9699ea73047ff1d5c1df4943 to hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/mobdir/data/default/testtb-testEmptyExportFileSystemState/37e3865d2ff7e5e2da33a03e3f723df0/cf/c4ca4238a0b923820dcc509a6f75849b20241205092a08a807e843f0997ae37b4438260d_9cf00a5e9699ea73047ff1d5c1df4943 2024-12-05T12:06:39,704 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testEmptyExportFileSystemState/9cf00a5e9699ea73047ff1d5c1df4943/.tmp/cf/c2844d189a184ec19f9bf83ceb3b27c3, store: [table=testtb-testEmptyExportFileSystemState family=cf region=9cf00a5e9699ea73047ff1d5c1df4943] 2024-12-05T12:06:39,705 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testEmptyExportFileSystemState/9cf00a5e9699ea73047ff1d5c1df4943/.tmp/cf/c2844d189a184ec19f9bf83ceb3b27c3 is 214, key is 10a98d9bf806862756ea41366ed6bf11b/cf:q/1733400399464/Put/seqid=0 2024-12-05T12:06:39,708 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742282_1458 (size=5514) 2024-12-05T12:06:39,708 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742282_1458 (size=5514) 2024-12-05T12:06:39,708 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742282_1458 (size=5514) 2024-12-05T12:06:39,709 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=6, memsize=65, hasBloomFilter=true, into tmp file hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testEmptyExportFileSystemState/84f09b7b3f85c4035afdd6af15adcc69/.tmp/cf/c4109f5516c344c4b3494b5c68f3b790 2024-12-05T12:06:39,710 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742283_1459 (size=15659) 2024-12-05T12:06:39,710 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742283_1459 (size=15659) 2024-12-05T12:06:39,710 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742283_1459 (size=15659) 2024-12-05T12:06:39,710 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=6, memsize=3.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testEmptyExportFileSystemState/9cf00a5e9699ea73047ff1d5c1df4943/.tmp/cf/c2844d189a184ec19f9bf83ceb3b27c3 2024-12-05T12:06:39,713 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testEmptyExportFileSystemState/84f09b7b3f85c4035afdd6af15adcc69/.tmp/cf/c4109f5516c344c4b3494b5c68f3b790 as hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testEmptyExportFileSystemState/84f09b7b3f85c4035afdd6af15adcc69/cf/c4109f5516c344c4b3494b5c68f3b790 2024-12-05T12:06:39,715 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testEmptyExportFileSystemState/9cf00a5e9699ea73047ff1d5c1df4943/.tmp/cf/c2844d189a184ec19f9bf83ceb3b27c3 as hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testEmptyExportFileSystemState/9cf00a5e9699ea73047ff1d5c1df4943/cf/c2844d189a184ec19f9bf83ceb3b27c3 2024-12-05T12:06:39,717 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testEmptyExportFileSystemState/84f09b7b3f85c4035afdd6af15adcc69/cf/c4109f5516c344c4b3494b5c68f3b790, entries=1, sequenceid=6, filesize=5.4 K 2024-12-05T12:06:39,718 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] regionserver.HRegion(3140): Finished flush of dataSize ~65 B/65, heapSize ~384 B/384, currentSize=0 B/0 for 84f09b7b3f85c4035afdd6af15adcc69 in 50ms, sequenceid=6, compaction requested=false 2024-12-05T12:06:39,718 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testEmptyExportFileSystemState' 2024-12-05T12:06:39,719 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] regionserver.HRegion(2603): Flush status journal for 84f09b7b3f85c4035afdd6af15adcc69: 2024-12-05T12:06:39,719 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testEmptyExportFileSystemState,,1733400398478.84f09b7b3f85c4035afdd6af15adcc69. for snaptb0-testEmptyExportFileSystemState completed. 2024-12-05T12:06:39,719 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] snapshot.SnapshotManifest(241): Storing 'testtb-testEmptyExportFileSystemState,,1733400398478.84f09b7b3f85c4035afdd6af15adcc69.' region-info for snapshot=snaptb0-testEmptyExportFileSystemState 2024-12-05T12:06:39,719 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-05T12:06:39,719 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testEmptyExportFileSystemState/84f09b7b3f85c4035afdd6af15adcc69/cf/c4109f5516c344c4b3494b5c68f3b790] hfiles 2024-12-05T12:06:39,719 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testEmptyExportFileSystemState/84f09b7b3f85c4035afdd6af15adcc69/cf/c4109f5516c344c4b3494b5c68f3b790 for snapshot=snaptb0-testEmptyExportFileSystemState 2024-12-05T12:06:39,720 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testEmptyExportFileSystemState/9cf00a5e9699ea73047ff1d5c1df4943/cf/c2844d189a184ec19f9bf83ceb3b27c3, entries=49, sequenceid=6, filesize=15.3 K 2024-12-05T12:06:39,721 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] regionserver.HRegion(3140): Finished flush of dataSize ~3.19 KB/3271, heapSize ~7.13 KB/7296, currentSize=0 B/0 for 9cf00a5e9699ea73047ff1d5c1df4943 in 53ms, sequenceid=6, compaction requested=false 2024-12-05T12:06:39,721 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] regionserver.HRegion(2603): Flush status journal for 9cf00a5e9699ea73047ff1d5c1df4943: 2024-12-05T12:06:39,721 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testEmptyExportFileSystemState,1,1733400398478.9cf00a5e9699ea73047ff1d5c1df4943. for snaptb0-testEmptyExportFileSystemState completed. 2024-12-05T12:06:39,721 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] snapshot.SnapshotManifest(241): Storing 'testtb-testEmptyExportFileSystemState,1,1733400398478.9cf00a5e9699ea73047ff1d5c1df4943.' region-info for snapshot=snaptb0-testEmptyExportFileSystemState 2024-12-05T12:06:39,721 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-05T12:06:39,721 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testEmptyExportFileSystemState/9cf00a5e9699ea73047ff1d5c1df4943/cf/c2844d189a184ec19f9bf83ceb3b27c3] hfiles 2024-12-05T12:06:39,721 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testEmptyExportFileSystemState/9cf00a5e9699ea73047ff1d5c1df4943/cf/c2844d189a184ec19f9bf83ceb3b27c3 for snapshot=snaptb0-testEmptyExportFileSystemState 2024-12-05T12:06:39,725 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742284_1460 (size=115) 2024-12-05T12:06:39,725 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742284_1460 (size=115) 2024-12-05T12:06:39,725 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742284_1460 (size=115) 2024-12-05T12:06:39,726 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testEmptyExportFileSystemState,,1733400398478.84f09b7b3f85c4035afdd6af15adcc69. 2024-12-05T12:06:39,726 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=204 2024-12-05T12:06:39,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] master.HMaster(4169): Remote procedure done, pid=204 2024-12-05T12:06:39,726 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testEmptyExportFileSystemState on region 84f09b7b3f85c4035afdd6af15adcc69 2024-12-05T12:06:39,727 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=204, ppid=203, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 84f09b7b3f85c4035afdd6af15adcc69 2024-12-05T12:06:39,727 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742285_1461 (size=115) 2024-12-05T12:06:39,728 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742285_1461 (size=115) 2024-12-05T12:06:39,728 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742285_1461 (size=115) 2024-12-05T12:06:39,728 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testEmptyExportFileSystemState,1,1733400398478.9cf00a5e9699ea73047ff1d5c1df4943. 2024-12-05T12:06:39,728 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=205 2024-12-05T12:06:39,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] master.HMaster(4169): Remote procedure done, pid=205 2024-12-05T12:06:39,728 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testEmptyExportFileSystemState on region 9cf00a5e9699ea73047ff1d5c1df4943 2024-12-05T12:06:39,729 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=205, ppid=203, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 9cf00a5e9699ea73047ff1d5c1df4943 2024-12-05T12:06:39,729 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=204, ppid=203, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 84f09b7b3f85c4035afdd6af15adcc69 in 214 msec 2024-12-05T12:06:39,731 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=205, resume processing ppid=203 2024-12-05T12:06:39,731 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=203, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-05T12:06:39,731 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=205, ppid=203, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 9cf00a5e9699ea73047ff1d5c1df4943 in 215 msec 2024-12-05T12:06:39,731 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=203, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-05T12:06:39,732 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-12-05T12:06:39,732 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-12-05T12:06:39,733 DEBUG [MobRegionSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T12:06:39,734 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(366): Adding snapshot references for [hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/mobdir/data/default/testtb-testEmptyExportFileSystemState/37e3865d2ff7e5e2da33a03e3f723df0/cf/c4ca4238a0b923820dcc509a6f75849b20241205092a08a807e843f0997ae37b4438260d_9cf00a5e9699ea73047ff1d5c1df4943, hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/mobdir/data/default/testtb-testEmptyExportFileSystemState/37e3865d2ff7e5e2da33a03e3f723df0/cf/d41d8cd98f00b204e9800998ecf8427e20241205fa4033e8c57144fc9aa213e0b2fd25ba_84f09b7b3f85c4035afdd6af15adcc69] hfiles 2024-12-05T12:06:39,734 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (1/2): hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/mobdir/data/default/testtb-testEmptyExportFileSystemState/37e3865d2ff7e5e2da33a03e3f723df0/cf/c4ca4238a0b923820dcc509a6f75849b20241205092a08a807e843f0997ae37b4438260d_9cf00a5e9699ea73047ff1d5c1df4943 2024-12-05T12:06:39,734 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (2/2): hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/mobdir/data/default/testtb-testEmptyExportFileSystemState/37e3865d2ff7e5e2da33a03e3f723df0/cf/d41d8cd98f00b204e9800998ecf8427e20241205fa4033e8c57144fc9aa213e0b2fd25ba_84f09b7b3f85c4035afdd6af15adcc69 2024-12-05T12:06:39,739 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742286_1462 (size=299) 2024-12-05T12:06:39,739 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742286_1462 (size=299) 2024-12-05T12:06:39,739 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742286_1462 (size=299) 2024-12-05T12:06:39,741 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=203, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-05T12:06:39,741 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testEmptyExportFileSystemState 2024-12-05T12:06:39,741 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/.hbase-snapshot/.tmp/snaptb0-testEmptyExportFileSystemState 2024-12-05T12:06:39,748 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742287_1463 (size=983) 2024-12-05T12:06:39,748 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742287_1463 (size=983) 2024-12-05T12:06:39,749 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742287_1463 (size=983) 2024-12-05T12:06:39,751 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=203, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-05T12:06:39,756 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=203, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-05T12:06:39,756 DEBUG [PEWorker-1 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/.hbase-snapshot/.tmp/snaptb0-testEmptyExportFileSystemState to hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/.hbase-snapshot/snaptb0-testEmptyExportFileSystemState 2024-12-05T12:06:39,757 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=203, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-05T12:06:39,758 DEBUG [PEWorker-1 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 }, snapshot procedure id = 203 2024-12-05T12:06:39,759 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=203, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } in 257 msec 2024-12-05T12:06:39,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=203 2024-12-05T12:06:39,818 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testEmptyExportFileSystemState completed 2024-12-05T12:06:39,818 INFO [Time-limited test {}] snapshot.TestExportSnapshot(515): HDFS export destination path: hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/export-test/export-1733400399818 2024-12-05T12:06:39,818 INFO [Time-limited test {}] snapshot.TestExportSnapshot(542): tgtFsUri=hdfs://localhost:37541, tgtDir=hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/export-test/export-1733400399818, rawTgtDir=hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/export-test/export-1733400399818, srcFsUri=hdfs://localhost:37541, srcDir=hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300 2024-12-05T12:06:39,844 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1094): inputFs=hdfs://localhost:37541, inputRoot=hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300 2024-12-05T12:06:39,844 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1095): outputFs=DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-954888707_22, ugi=jenkins (auth:SIMPLE)]], outputRoot=hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/export-test/export-1733400399818, skipTmp=false, initialOutputSnapshotDir=hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/export-test/export-1733400399818/.hbase-snapshot/.tmp/emptySnaptb0-testEmptyExportFileSystemState 2024-12-05T12:06:39,845 INFO [Time-limited test {}] snapshot.ExportSnapshot(1104): Verify the source snapshot's expiration status and integrity. 2024-12-05T12:06:39,849 INFO [Time-limited test {}] snapshot.ExportSnapshot(1162): Copy Snapshot Manifest from hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState to hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/export-test/export-1733400399818/.hbase-snapshot/.tmp/emptySnaptb0-testEmptyExportFileSystemState 2024-12-05T12:06:39,896 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742289_1465 (size=673) 2024-12-05T12:06:39,897 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742289_1465 (size=673) 2024-12-05T12:06:39,897 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742288_1464 (size=185) 2024-12-05T12:06:39,897 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742288_1464 (size=185) 2024-12-05T12:06:39,897 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742288_1464 (size=185) 2024-12-05T12:06:39,897 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742289_1465 (size=673) 2024-12-05T12:06:39,898 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-common/target/hbase-common-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-05T12:06:39,899 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-protocol-shaded/target/hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-05T12:06:39,899 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-client/target/hbase-client-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-05T12:06:40,360 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testEmptyExportFileSystemState 2024-12-05T12:06:40,361 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testEmptyExportFileSystemState Metrics about Tables on a single HBase RegionServer 2024-12-05T12:06:40,361 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportExpiredSnapshot 2024-12-05T12:06:40,722 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/b25dcafd-a00c-c924-93f8-4d057d2ca201/hadoop-11278833125078312177.jar 2024-12-05T12:06:40,722 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-05T12:06:40,722 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-05T12:06:40,781 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/b25dcafd-a00c-c924-93f8-4d057d2ca201/hadoop-9290893087746872857.jar 2024-12-05T12:06:40,781 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics/target/hbase-metrics-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-05T12:06:40,782 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics-api/target/hbase-metrics-api-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-05T12:06:40,782 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-replication/target/hbase-replication-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-05T12:06:40,782 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-http/target/hbase-http-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-05T12:06:40,782 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-procedure/target/hbase-procedure-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-05T12:06:40,783 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-zookeeper/target/hbase-zookeeper-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-05T12:06:40,783 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-12-05T12:06:40,783 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-12-05T12:06:40,783 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-12-05T12:06:40,783 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-12-05T12:06:40,784 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-12-05T12:06:40,784 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-12-05T12:06:40,784 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-12-05T12:06:40,784 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-12-05T12:06:40,784 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-12-05T12:06:40,785 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-12-05T12:06:40,785 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-12-05T12:06:40,785 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-05T12:06:40,785 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-05T12:06:40,785 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-05T12:06:40,786 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-05T12:06:40,786 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-05T12:06:40,786 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-05T12:06:40,786 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-05T12:06:40,837 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742290_1466 (size=131440) 2024-12-05T12:06:40,837 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742290_1466 (size=131440) 2024-12-05T12:06:40,837 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742290_1466 (size=131440) 2024-12-05T12:06:40,855 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742291_1467 (size=4188619) 2024-12-05T12:06:40,855 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742291_1467 (size=4188619) 2024-12-05T12:06:40,855 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742291_1467 (size=4188619) 2024-12-05T12:06:40,866 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742292_1468 (size=1323991) 2024-12-05T12:06:40,867 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742292_1468 (size=1323991) 2024-12-05T12:06:40,867 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742292_1468 (size=1323991) 2024-12-05T12:06:40,878 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742293_1469 (size=903934) 2024-12-05T12:06:40,878 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742293_1469 (size=903934) 2024-12-05T12:06:40,878 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742293_1469 (size=903934) 2024-12-05T12:06:40,903 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742294_1470 (size=8360360) 2024-12-05T12:06:40,904 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742294_1470 (size=8360360) 2024-12-05T12:06:40,904 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742294_1470 (size=8360360) 2024-12-05T12:06:40,913 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742295_1471 (size=1877034) 2024-12-05T12:06:40,913 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742295_1471 (size=1877034) 2024-12-05T12:06:40,914 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742295_1471 (size=1877034) 2024-12-05T12:06:40,919 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742296_1472 (size=77835) 2024-12-05T12:06:40,919 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742296_1472 (size=77835) 2024-12-05T12:06:40,920 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742296_1472 (size=77835) 2024-12-05T12:06:40,950 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742297_1473 (size=6425023) 2024-12-05T12:06:40,950 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742297_1473 (size=6425023) 2024-12-05T12:06:40,950 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742297_1473 (size=6425023) 2024-12-05T12:06:40,960 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742298_1474 (size=30949) 2024-12-05T12:06:40,960 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742298_1474 (size=30949) 2024-12-05T12:06:40,961 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742298_1474 (size=30949) 2024-12-05T12:06:40,970 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742299_1475 (size=1597213) 2024-12-05T12:06:40,970 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742299_1475 (size=1597213) 2024-12-05T12:06:40,971 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742299_1475 (size=1597213) 2024-12-05T12:06:40,985 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742300_1476 (size=4695811) 2024-12-05T12:06:40,986 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742300_1476 (size=4695811) 2024-12-05T12:06:40,986 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742300_1476 (size=4695811) 2024-12-05T12:06:40,993 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742301_1477 (size=232957) 2024-12-05T12:06:40,993 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742301_1477 (size=232957) 2024-12-05T12:06:40,993 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742301_1477 (size=232957) 2024-12-05T12:06:40,999 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742302_1478 (size=127628) 2024-12-05T12:06:40,999 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742302_1478 (size=127628) 2024-12-05T12:06:41,000 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742302_1478 (size=127628) 2024-12-05T12:06:41,005 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742303_1479 (size=20406) 2024-12-05T12:06:41,005 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742303_1479 (size=20406) 2024-12-05T12:06:41,007 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742303_1479 (size=20406) 2024-12-05T12:06:41,021 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742304_1480 (size=5175431) 2024-12-05T12:06:41,022 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742304_1480 (size=5175431) 2024-12-05T12:06:41,022 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742304_1480 (size=5175431) 2024-12-05T12:06:41,028 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742305_1481 (size=217634) 2024-12-05T12:06:41,028 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742305_1481 (size=217634) 2024-12-05T12:06:41,029 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742305_1481 (size=217634) 2024-12-05T12:06:41,038 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742306_1482 (size=1832290) 2024-12-05T12:06:41,038 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742306_1482 (size=1832290) 2024-12-05T12:06:41,038 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742306_1482 (size=1832290) 2024-12-05T12:06:41,045 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742307_1483 (size=322274) 2024-12-05T12:06:41,045 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742307_1483 (size=322274) 2024-12-05T12:06:41,046 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742307_1483 (size=322274) 2024-12-05T12:06:41,060 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742308_1484 (size=503880) 2024-12-05T12:06:41,060 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742308_1484 (size=503880) 2024-12-05T12:06:41,060 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742308_1484 (size=503880) 2024-12-05T12:06:41,073 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742309_1485 (size=29229) 2024-12-05T12:06:41,073 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742309_1485 (size=29229) 2024-12-05T12:06:41,073 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742309_1485 (size=29229) 2024-12-05T12:06:41,087 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742310_1486 (size=24096) 2024-12-05T12:06:41,087 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742310_1486 (size=24096) 2024-12-05T12:06:41,087 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742310_1486 (size=24096) 2024-12-05T12:06:41,093 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742311_1487 (size=111872) 2024-12-05T12:06:41,093 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742311_1487 (size=111872) 2024-12-05T12:06:41,093 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742311_1487 (size=111872) 2024-12-05T12:06:41,100 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742312_1488 (size=443172) 2024-12-05T12:06:41,100 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742312_1488 (size=443172) 2024-12-05T12:06:41,101 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742312_1488 (size=443172) 2024-12-05T12:06:41,106 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742313_1489 (size=45609) 2024-12-05T12:06:41,107 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742313_1489 (size=45609) 2024-12-05T12:06:41,107 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742313_1489 (size=45609) 2024-12-05T12:06:41,118 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742314_1490 (size=136454) 2024-12-05T12:06:41,118 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742314_1490 (size=136454) 2024-12-05T12:06:41,118 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742314_1490 (size=136454) 2024-12-05T12:06:41,119 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-12-05T12:06:41,121 INFO [Time-limited test {}] snapshot.ExportSnapshot(663): Loading Snapshot 'emptySnaptb0-testEmptyExportFileSystemState' hfile list 2024-12-05T12:06:41,127 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742315_1491 (size=7) 2024-12-05T12:06:41,127 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742315_1491 (size=7) 2024-12-05T12:06:41,127 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742315_1491 (size=7) 2024-12-05T12:06:41,132 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742316_1492 (size=10) 2024-12-05T12:06:41,133 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742316_1492 (size=10) 2024-12-05T12:06:41,133 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742316_1492 (size=10) 2024-12-05T12:06:41,166 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742317_1493 (size=303901) 2024-12-05T12:06:41,166 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742317_1493 (size=303901) 2024-12-05T12:06:41,166 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742317_1493 (size=303901) 2024-12-05T12:06:41,180 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-05T12:06:41,180 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-05T12:06:41,255 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733400137807_0008_000001 (auth:SIMPLE) from 127.0.0.1:35274 2024-12-05T12:06:42,745 INFO [AsyncFSWAL-0-hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/MasterData-prefix:80666a11a09f,34947,1733400129965 {}] wal.AbstractFSWAL(1368): Slow sync cost: 129 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:39603,DS-3b5287fb-7177-4c9b-b36f-a90d42a8b13c,DISK], DatanodeInfoWithStorage[127.0.0.1:35565,DS-629f3698-fec0-408e-9eb6-41b26b8f31e2,DISK], DatanodeInfoWithStorage[127.0.0.1:37713,DS-ee047669-e603-4685-b719-a5ac8b615655,DISK]] 2024-12-05T12:06:43,713 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-05T12:06:46,481 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733400137807_0008_000001 (auth:SIMPLE) from 127.0.0.1:32776 2024-12-05T12:06:46,858 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742318_1494 (size=349575) 2024-12-05T12:06:46,859 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742318_1494 (size=349575) 2024-12-05T12:06:46,859 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742318_1494 (size=349575) 2024-12-05T12:06:47,570 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742319_1495 (size=8568) 2024-12-05T12:06:47,570 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742319_1495 (size=8568) 2024-12-05T12:06:47,570 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742319_1495 (size=8568) 2024-12-05T12:06:47,580 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742320_1496 (size=460) 2024-12-05T12:06:47,580 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742320_1496 (size=460) 2024-12-05T12:06:47,580 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742320_1496 (size=460) 2024-12-05T12:06:47,605 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742321_1497 (size=8568) 2024-12-05T12:06:47,605 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742321_1497 (size=8568) 2024-12-05T12:06:47,605 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742321_1497 (size=8568) 2024-12-05T12:06:47,619 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742322_1498 (size=349575) 2024-12-05T12:06:47,619 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742322_1498 (size=349575) 2024-12-05T12:06:47,619 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742322_1498 (size=349575) 2024-12-05T12:06:49,292 INFO [Time-limited test {}] snapshot.ExportSnapshot(1219): Finalize the Snapshot Export 2024-12-05T12:06:49,293 INFO [Time-limited test {}] snapshot.ExportSnapshot(1230): Verify the exported snapshot's expiration status and integrity. 2024-12-05T12:06:49,298 INFO [Time-limited test {}] snapshot.ExportSnapshot(1236): Export Completed: emptySnaptb0-testEmptyExportFileSystemState 2024-12-05T12:06:49,298 INFO [Time-limited test {}] snapshot.TestExportSnapshot(409): Exported snapshot 2024-12-05T12:06:49,299 INFO [Time-limited test {}] snapshot.TestExportSnapshot(420): Verified filesystem state 2024-12-05T12:06:49,299 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-954888707_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState at hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState 2024-12-05T12:06:49,300 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState/.snapshotinfo 2024-12-05T12:06:49,300 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState/data.manifest 2024-12-05T12:06:49,300 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-954888707_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/export-test/export-1733400399818/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState at hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/export-test/export-1733400399818/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState 2024-12-05T12:06:49,301 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/export-test/export-1733400399818/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState/.snapshotinfo 2024-12-05T12:06:49,301 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/export-test/export-1733400399818/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState/data.manifest 2024-12-05T12:06:49,306 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.2 disable testtb-testEmptyExportFileSystemState 2024-12-05T12:06:49,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] procedure2.ProcedureExecutor(1139): Stored pid=206, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testtb-testEmptyExportFileSystemState 2024-12-05T12:06:49,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=206 2024-12-05T12:06:49,310 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testEmptyExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733400409310"}]},"ts":"1733400409310"} 2024-12-05T12:06:49,313 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testEmptyExportFileSystemState, state=DISABLING in hbase:meta 2024-12-05T12:06:49,313 INFO [PEWorker-4 {}] procedure.DisableTableProcedure(284): Set testtb-testEmptyExportFileSystemState to state=DISABLING 2024-12-05T12:06:49,314 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=207, ppid=206, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testtb-testEmptyExportFileSystemState}] 2024-12-05T12:06:49,317 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=208, ppid=207, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=84f09b7b3f85c4035afdd6af15adcc69, UNASSIGN}, {pid=209, ppid=207, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=9cf00a5e9699ea73047ff1d5c1df4943, UNASSIGN}] 2024-12-05T12:06:49,319 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=209, ppid=207, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=9cf00a5e9699ea73047ff1d5c1df4943, UNASSIGN 2024-12-05T12:06:49,319 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=208, ppid=207, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=84f09b7b3f85c4035afdd6af15adcc69, UNASSIGN 2024-12-05T12:06:49,320 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=208 updating hbase:meta row=84f09b7b3f85c4035afdd6af15adcc69, regionState=CLOSING, regionLocation=80666a11a09f,42305,1733400131012 2024-12-05T12:06:49,320 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=209 updating hbase:meta row=9cf00a5e9699ea73047ff1d5c1df4943, regionState=CLOSING, regionLocation=80666a11a09f,42913,1733400130845 2024-12-05T12:06:49,321 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=209, ppid=207, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=9cf00a5e9699ea73047ff1d5c1df4943, UNASSIGN because future has completed 2024-12-05T12:06:49,322 DEBUG [PEWorker-1 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-05T12:06:49,322 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=210, ppid=209, state=RUNNABLE, hasLock=false; CloseRegionProcedure 9cf00a5e9699ea73047ff1d5c1df4943, server=80666a11a09f,42913,1733400130845}] 2024-12-05T12:06:49,323 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=208, ppid=207, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=84f09b7b3f85c4035afdd6af15adcc69, UNASSIGN because future has completed 2024-12-05T12:06:49,323 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-05T12:06:49,323 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=211, ppid=208, state=RUNNABLE, hasLock=false; CloseRegionProcedure 84f09b7b3f85c4035afdd6af15adcc69, server=80666a11a09f,42305,1733400131012}] 2024-12-05T12:06:49,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=206 2024-12-05T12:06:49,473 INFO [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=210}] handler.UnassignRegionHandler(122): Close 9cf00a5e9699ea73047ff1d5c1df4943 2024-12-05T12:06:49,474 DEBUG [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=210}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-05T12:06:49,474 DEBUG [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=210}] regionserver.HRegion(1722): Closing 9cf00a5e9699ea73047ff1d5c1df4943, disabling compactions & flushes 2024-12-05T12:06:49,474 INFO [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=210}] regionserver.HRegion(1755): Closing region testtb-testEmptyExportFileSystemState,1,1733400398478.9cf00a5e9699ea73047ff1d5c1df4943. 2024-12-05T12:06:49,474 DEBUG [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=210}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testEmptyExportFileSystemState,1,1733400398478.9cf00a5e9699ea73047ff1d5c1df4943. 2024-12-05T12:06:49,474 DEBUG [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=210}] regionserver.HRegion(1843): Acquired close lock on testtb-testEmptyExportFileSystemState,1,1733400398478.9cf00a5e9699ea73047ff1d5c1df4943. after waiting 0 ms 2024-12-05T12:06:49,474 DEBUG [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=210}] regionserver.HRegion(1853): Updates disabled for region testtb-testEmptyExportFileSystemState,1,1733400398478.9cf00a5e9699ea73047ff1d5c1df4943. 2024-12-05T12:06:49,474 INFO [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=211}] handler.UnassignRegionHandler(122): Close 84f09b7b3f85c4035afdd6af15adcc69 2024-12-05T12:06:49,474 DEBUG [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=211}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-05T12:06:49,474 DEBUG [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=211}] regionserver.HRegion(1722): Closing 84f09b7b3f85c4035afdd6af15adcc69, disabling compactions & flushes 2024-12-05T12:06:49,475 INFO [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=211}] regionserver.HRegion(1755): Closing region testtb-testEmptyExportFileSystemState,,1733400398478.84f09b7b3f85c4035afdd6af15adcc69. 2024-12-05T12:06:49,475 DEBUG [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=211}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testEmptyExportFileSystemState,,1733400398478.84f09b7b3f85c4035afdd6af15adcc69. 2024-12-05T12:06:49,475 DEBUG [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=211}] regionserver.HRegion(1843): Acquired close lock on testtb-testEmptyExportFileSystemState,,1733400398478.84f09b7b3f85c4035afdd6af15adcc69. after waiting 0 ms 2024-12-05T12:06:49,475 DEBUG [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=211}] regionserver.HRegion(1853): Updates disabled for region testtb-testEmptyExportFileSystemState,,1733400398478.84f09b7b3f85c4035afdd6af15adcc69. 2024-12-05T12:06:49,478 DEBUG [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=210}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testEmptyExportFileSystemState/9cf00a5e9699ea73047ff1d5c1df4943/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-05T12:06:49,478 DEBUG [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=210}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-05T12:06:49,478 INFO [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=210}] regionserver.HRegion(1973): Closed testtb-testEmptyExportFileSystemState,1,1733400398478.9cf00a5e9699ea73047ff1d5c1df4943. 2024-12-05T12:06:49,478 DEBUG [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=210}] regionserver.HRegion(1676): Region close journal for 9cf00a5e9699ea73047ff1d5c1df4943: Waiting for close lock at 1733400409474Running coprocessor pre-close hooks at 1733400409474Disabling compacts and flushes for region at 1733400409474Disabling writes for close at 1733400409474Writing region close event to WAL at 1733400409475 (+1 ms)Running coprocessor post-close hooks at 1733400409478 (+3 ms)Closed at 1733400409478 2024-12-05T12:06:49,480 INFO [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=210}] handler.UnassignRegionHandler(157): Closed 9cf00a5e9699ea73047ff1d5c1df4943 2024-12-05T12:06:49,481 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=209 updating hbase:meta row=9cf00a5e9699ea73047ff1d5c1df4943, regionState=CLOSED 2024-12-05T12:06:49,482 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=210, ppid=209, state=RUNNABLE, hasLock=false; CloseRegionProcedure 9cf00a5e9699ea73047ff1d5c1df4943, server=80666a11a09f,42913,1733400130845 because future has completed 2024-12-05T12:06:49,490 DEBUG [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=211}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testEmptyExportFileSystemState/84f09b7b3f85c4035afdd6af15adcc69/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-05T12:06:49,491 DEBUG [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=211}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-05T12:06:49,491 INFO [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=211}] regionserver.HRegion(1973): Closed testtb-testEmptyExportFileSystemState,,1733400398478.84f09b7b3f85c4035afdd6af15adcc69. 2024-12-05T12:06:49,491 DEBUG [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=211}] regionserver.HRegion(1676): Region close journal for 84f09b7b3f85c4035afdd6af15adcc69: Waiting for close lock at 1733400409474Running coprocessor pre-close hooks at 1733400409474Disabling compacts and flushes for region at 1733400409474Disabling writes for close at 1733400409475 (+1 ms)Writing region close event to WAL at 1733400409476 (+1 ms)Running coprocessor post-close hooks at 1733400409491 (+15 ms)Closed at 1733400409491 2024-12-05T12:06:49,496 INFO [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=211}] handler.UnassignRegionHandler(157): Closed 84f09b7b3f85c4035afdd6af15adcc69 2024-12-05T12:06:49,497 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=210, resume processing ppid=209 2024-12-05T12:06:49,497 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=210, ppid=209, state=SUCCESS, hasLock=false; CloseRegionProcedure 9cf00a5e9699ea73047ff1d5c1df4943, server=80666a11a09f,42913,1733400130845 in 166 msec 2024-12-05T12:06:49,497 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=208 updating hbase:meta row=84f09b7b3f85c4035afdd6af15adcc69, regionState=CLOSED 2024-12-05T12:06:49,499 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=209, ppid=207, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=9cf00a5e9699ea73047ff1d5c1df4943, UNASSIGN in 180 msec 2024-12-05T12:06:49,499 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=211, ppid=208, state=RUNNABLE, hasLock=false; CloseRegionProcedure 84f09b7b3f85c4035afdd6af15adcc69, server=80666a11a09f,42305,1733400131012 because future has completed 2024-12-05T12:06:49,501 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=211, resume processing ppid=208 2024-12-05T12:06:49,501 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=211, ppid=208, state=SUCCESS, hasLock=false; CloseRegionProcedure 84f09b7b3f85c4035afdd6af15adcc69, server=80666a11a09f,42305,1733400131012 in 176 msec 2024-12-05T12:06:49,504 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=208, resume processing ppid=207 2024-12-05T12:06:49,504 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=208, ppid=207, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=84f09b7b3f85c4035afdd6af15adcc69, UNASSIGN in 184 msec 2024-12-05T12:06:49,506 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=207, resume processing ppid=206 2024-12-05T12:06:49,506 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=207, ppid=206, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testtb-testEmptyExportFileSystemState in 190 msec 2024-12-05T12:06:49,507 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testEmptyExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733400409507"}]},"ts":"1733400409507"} 2024-12-05T12:06:49,508 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testEmptyExportFileSystemState, state=DISABLED in hbase:meta 2024-12-05T12:06:49,508 INFO [PEWorker-4 {}] procedure.DisableTableProcedure(296): Set testtb-testEmptyExportFileSystemState to state=DISABLED 2024-12-05T12:06:49,510 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=206, state=SUCCESS, hasLock=false; DisableTableProcedure table=testtb-testEmptyExportFileSystemState in 202 msec 2024-12-05T12:06:49,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=206 2024-12-05T12:06:49,628 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testtb-testEmptyExportFileSystemState completed 2024-12-05T12:06:49,629 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.2 delete testtb-testEmptyExportFileSystemState 2024-12-05T12:06:49,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] procedure2.ProcedureExecutor(1139): Stored pid=212, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testtb-testEmptyExportFileSystemState 2024-12-05T12:06:49,634 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=212, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testEmptyExportFileSystemState 2024-12-05T12:06:49,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] access.PermissionStorage(261): Removing permissions of removed table testtb-testEmptyExportFileSystemState 2024-12-05T12:06:49,638 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=212, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testtb-testEmptyExportFileSystemState 2024-12-05T12:06:49,640 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42305 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testtb-testEmptyExportFileSystemState 2024-12-05T12:06:49,642 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testEmptyExportFileSystemState/9cf00a5e9699ea73047ff1d5c1df4943 2024-12-05T12:06:49,642 DEBUG [HFileArchiver-23 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testEmptyExportFileSystemState/84f09b7b3f85c4035afdd6af15adcc69 2024-12-05T12:06:49,643 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testEmptyExportFileSystemState/9cf00a5e9699ea73047ff1d5c1df4943/cf, FileablePath, hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testEmptyExportFileSystemState/9cf00a5e9699ea73047ff1d5c1df4943/recovered.edits] 2024-12-05T12:06:49,643 DEBUG [HFileArchiver-23 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testEmptyExportFileSystemState/84f09b7b3f85c4035afdd6af15adcc69/cf, FileablePath, hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testEmptyExportFileSystemState/84f09b7b3f85c4035afdd6af15adcc69/recovered.edits] 2024-12-05T12:06:49,651 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testEmptyExportFileSystemState/9cf00a5e9699ea73047ff1d5c1df4943/cf/c2844d189a184ec19f9bf83ceb3b27c3 to hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/archive/data/default/testtb-testEmptyExportFileSystemState/9cf00a5e9699ea73047ff1d5c1df4943/cf/c2844d189a184ec19f9bf83ceb3b27c3 2024-12-05T12:06:49,651 DEBUG [HFileArchiver-23 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testEmptyExportFileSystemState/84f09b7b3f85c4035afdd6af15adcc69/cf/c4109f5516c344c4b3494b5c68f3b790 to hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/archive/data/default/testtb-testEmptyExportFileSystemState/84f09b7b3f85c4035afdd6af15adcc69/cf/c4109f5516c344c4b3494b5c68f3b790 2024-12-05T12:06:49,655 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testEmptyExportFileSystemState/9cf00a5e9699ea73047ff1d5c1df4943/recovered.edits/9.seqid to hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/archive/data/default/testtb-testEmptyExportFileSystemState/9cf00a5e9699ea73047ff1d5c1df4943/recovered.edits/9.seqid 2024-12-05T12:06:49,655 DEBUG [HFileArchiver-23 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testEmptyExportFileSystemState/84f09b7b3f85c4035afdd6af15adcc69/recovered.edits/9.seqid to hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/archive/data/default/testtb-testEmptyExportFileSystemState/84f09b7b3f85c4035afdd6af15adcc69/recovered.edits/9.seqid 2024-12-05T12:06:49,656 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testEmptyExportFileSystemState/9cf00a5e9699ea73047ff1d5c1df4943 2024-12-05T12:06:49,656 DEBUG [HFileArchiver-23 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testEmptyExportFileSystemState/84f09b7b3f85c4035afdd6af15adcc69 2024-12-05T12:06:49,656 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(313): Archived testtb-testEmptyExportFileSystemState regions 2024-12-05T12:06:49,657 DEBUG [PEWorker-5 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/mobdir/data/default/testtb-testEmptyExportFileSystemState/37e3865d2ff7e5e2da33a03e3f723df0 2024-12-05T12:06:49,658 DEBUG [PEWorker-5 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/mobdir/data/default/testtb-testEmptyExportFileSystemState/37e3865d2ff7e5e2da33a03e3f723df0/cf] 2024-12-05T12:06:49,662 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/mobdir/data/default/testtb-testEmptyExportFileSystemState/37e3865d2ff7e5e2da33a03e3f723df0/cf/c4ca4238a0b923820dcc509a6f75849b20241205092a08a807e843f0997ae37b4438260d_9cf00a5e9699ea73047ff1d5c1df4943 to hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/archive/data/default/testtb-testEmptyExportFileSystemState/37e3865d2ff7e5e2da33a03e3f723df0/cf/c4ca4238a0b923820dcc509a6f75849b20241205092a08a807e843f0997ae37b4438260d_9cf00a5e9699ea73047ff1d5c1df4943 2024-12-05T12:06:49,663 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/mobdir/data/default/testtb-testEmptyExportFileSystemState/37e3865d2ff7e5e2da33a03e3f723df0/cf/d41d8cd98f00b204e9800998ecf8427e20241205fa4033e8c57144fc9aa213e0b2fd25ba_84f09b7b3f85c4035afdd6af15adcc69 to hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/archive/data/default/testtb-testEmptyExportFileSystemState/37e3865d2ff7e5e2da33a03e3f723df0/cf/d41d8cd98f00b204e9800998ecf8427e20241205fa4033e8c57144fc9aa213e0b2fd25ba_84f09b7b3f85c4035afdd6af15adcc69 2024-12-05T12:06:49,664 DEBUG [PEWorker-5 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/mobdir/data/default/testtb-testEmptyExportFileSystemState/37e3865d2ff7e5e2da33a03e3f723df0 2024-12-05T12:06:49,666 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=212, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testtb-testEmptyExportFileSystemState 2024-12-05T12:06:49,669 WARN [PEWorker-5 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 2 rows of testtb-testEmptyExportFileSystemState from hbase:meta 2024-12-05T12:06:49,672 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(407): Removing 'testtb-testEmptyExportFileSystemState' descriptor. 2024-12-05T12:06:49,673 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=212, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testtb-testEmptyExportFileSystemState 2024-12-05T12:06:49,673 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(397): Removing 'testtb-testEmptyExportFileSystemState' from region states. 2024-12-05T12:06:49,673 DEBUG [PEWorker-5 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testEmptyExportFileSystemState,,1733400398478.84f09b7b3f85c4035afdd6af15adcc69.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733400409673"}]},"ts":"9223372036854775807"} 2024-12-05T12:06:49,674 DEBUG [PEWorker-5 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testEmptyExportFileSystemState,1,1733400398478.9cf00a5e9699ea73047ff1d5c1df4943.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733400409673"}]},"ts":"9223372036854775807"} 2024-12-05T12:06:49,676 INFO [PEWorker-5 {}] assignment.RegionStateStore(562): Deleted 2 regions from META 2024-12-05T12:06:49,676 DEBUG [PEWorker-5 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => 84f09b7b3f85c4035afdd6af15adcc69, NAME => 'testtb-testEmptyExportFileSystemState,,1733400398478.84f09b7b3f85c4035afdd6af15adcc69.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => 9cf00a5e9699ea73047ff1d5c1df4943, NAME => 'testtb-testEmptyExportFileSystemState,1,1733400398478.9cf00a5e9699ea73047ff1d5c1df4943.', STARTKEY => '1', ENDKEY => ''}] 2024-12-05T12:06:49,676 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(401): Marking 'testtb-testEmptyExportFileSystemState' as deleted. 2024-12-05T12:06:49,676 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testtb-testEmptyExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733400409676"}]},"ts":"9223372036854775807"} 2024-12-05T12:06:49,678 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(867): Deleted table testtb-testEmptyExportFileSystemState state from META 2024-12-05T12:06:49,679 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(133): Finished pid=212, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testEmptyExportFileSystemState 2024-12-05T12:06:49,680 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=212, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testtb-testEmptyExportFileSystemState in 50 msec 2024-12-05T12:06:49,693 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42305-0x101a6a890970003, quorum=127.0.0.1:49240, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testEmptyExportFileSystemState 2024-12-05T12:06:49,693 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34947-0x101a6a890970000, quorum=127.0.0.1:49240, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testEmptyExportFileSystemState 2024-12-05T12:06:49,693 DEBUG [pool-63-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42913-0x101a6a890970001, quorum=127.0.0.1:49240, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testEmptyExportFileSystemState 2024-12-05T12:06:49,693 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37629-0x101a6a890970002, quorum=127.0.0.1:49240, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testEmptyExportFileSystemState 2024-12-05T12:06:49,694 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testEmptyExportFileSystemState with data PBUF 2024-12-05T12:06:49,694 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testEmptyExportFileSystemState with data PBUF 2024-12-05T12:06:49,694 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testEmptyExportFileSystemState with data PBUF 2024-12-05T12:06:49,694 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testEmptyExportFileSystemState with data PBUF 2024-12-05T12:06:49,701 DEBUG [pool-63-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42913-0x101a6a890970001, quorum=127.0.0.1:49240, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testEmptyExportFileSystemState 2024-12-05T12:06:49,701 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42305-0x101a6a890970003, quorum=127.0.0.1:49240, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testEmptyExportFileSystemState 2024-12-05T12:06:49,701 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37629-0x101a6a890970002, quorum=127.0.0.1:49240, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testEmptyExportFileSystemState 2024-12-05T12:06:49,701 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34947-0x101a6a890970000, quorum=127.0.0.1:49240, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testEmptyExportFileSystemState 2024-12-05T12:06:49,701 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42305-0x101a6a890970003, quorum=127.0.0.1:49240, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-05T12:06:49,701 DEBUG [pool-63-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42913-0x101a6a890970001, quorum=127.0.0.1:49240, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-05T12:06:49,701 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37629-0x101a6a890970002, quorum=127.0.0.1:49240, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-05T12:06:49,702 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34947-0x101a6a890970000, quorum=127.0.0.1:49240, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-05T12:06:49,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=212 2024-12-05T12:06:49,703 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-05T12:06:49,703 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-05T12:06:49,703 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-05T12:06:49,703 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testtb-testEmptyExportFileSystemState 2024-12-05T12:06:49,703 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testtb-testEmptyExportFileSystemState completed 2024-12-05T12:06:49,703 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-05T12:06:49,708 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "emptySnaptb0-testEmptyExportFileSystemState" type: DISABLED 2024-12-05T12:06:49,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] snapshot.SnapshotManager(381): Deleting snapshot: emptySnaptb0-testEmptyExportFileSystemState 2024-12-05T12:06:49,711 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "snaptb0-testEmptyExportFileSystemState" type: DISABLED 2024-12-05T12:06:49,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] snapshot.SnapshotManager(381): Deleting snapshot: snaptb0-testEmptyExportFileSystemState 2024-12-05T12:06:49,731 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestMobSecureExportSnapshot#testEmptyExportFileSystemState Thread=822 (was 811) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1316414641_1 at /127.0.0.1:47980 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1316414641_1 at /127.0.0.1:51370 [Waiting for operation #3] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-954888707_22 at /127.0.0.1:45860 [Waiting for operation #4] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1055630025) connection to localhost/127.0.0.1:38433 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: ApplicationMasterLauncher #14 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-23 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:38433 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Thread-7435 java.base@17.0.11/java.io.FileInputStream.readBytes(Native Method) java.base@17.0.11/java.io.FileInputStream.read(FileInputStream.java:276) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:282) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/sun.nio.cs.StreamDecoder.readBytes(StreamDecoder.java:281) java.base@17.0.11/sun.nio.cs.StreamDecoder.implRead(StreamDecoder.java:324) java.base@17.0.11/sun.nio.cs.StreamDecoder.read(StreamDecoder.java:189) java.base@17.0.11/java.io.InputStreamReader.read(InputStreamReader.java:177) java.base@17.0.11/java.io.BufferedReader.fill(BufferedReader.java:162) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:329) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:396) app//org.apache.hadoop.util.Shell$1.run(Shell.java:1025) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:33767 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-954888707_22 at /127.0.0.1:52212 [Waiting for operation #6] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-954888707_22 at /127.0.0.1:41694 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1055630025) connection to localhost/127.0.0.1:33767 from appattempt_1733400137807_0008_000001 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: process reaper (pid 152824) java.base@17.0.11/java.lang.ProcessHandleImpl.waitForProcessExit0(Native Method) java.base@17.0.11/java.lang.ProcessHandleImpl$1.run(ProcessHandleImpl.java:150) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=833 (was 793) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=638 (was 661), ProcessCount=17 (was 14) - ProcessCount LEAK? -, AvailableMemoryMB=2272 (was 2759) 2024-12-05T12:06:49,731 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=822 is superior to 500 2024-12-05T12:06:49,748 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestMobSecureExportSnapshot#testExportWithChecksum Thread=822, OpenFileDescriptor=833, MaxFileDescriptor=1048576, SystemLoadAverage=638, ProcessCount=17, AvailableMemoryMB=2270 2024-12-05T12:06:49,748 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=822 is superior to 500 2024-12-05T12:06:49,750 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'testtb-testExportWithChecksum', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-05T12:06:49,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] procedure2.ProcedureExecutor(1139): Stored pid=213, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testtb-testExportWithChecksum 2024-12-05T12:06:49,751 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=213, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportWithChecksum execute state=CREATE_TABLE_PRE_OPERATION 2024-12-05T12:06:49,752 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testExportWithChecksum" procId is: 213 2024-12-05T12:06:49,752 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=213, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testtb-testExportWithChecksum execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-05T12:06:49,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=213 2024-12-05T12:06:49,758 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742323_1499 (size=440) 2024-12-05T12:06:49,758 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742323_1499 (size=440) 2024-12-05T12:06:49,758 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742323_1499 (size=440) 2024-12-05T12:06:49,760 INFO [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => ac3d10a1d44d11ee31615fff4b4a5947, NAME => 'testtb-testExportWithChecksum,,1733400409749.ac3d10a1d44d11ee31615fff4b4a5947.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testExportWithChecksum', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300 2024-12-05T12:06:49,760 INFO [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => 5c1512f0bac1eb033bb472e2b4a64235, NAME => 'testtb-testExportWithChecksum,1,1733400409749.5c1512f0bac1eb033bb472e2b4a64235.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testExportWithChecksum', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300 2024-12-05T12:06:49,765 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742324_1500 (size=65) 2024-12-05T12:06:49,765 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742324_1500 (size=65) 2024-12-05T12:06:49,765 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742324_1500 (size=65) 2024-12-05T12:06:49,765 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742325_1501 (size=65) 2024-12-05T12:06:49,765 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(898): Instantiated testtb-testExportWithChecksum,1,1733400409749.5c1512f0bac1eb033bb472e2b4a64235.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-05T12:06:49,765 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(1722): Closing 5c1512f0bac1eb033bb472e2b4a64235, disabling compactions & flushes 2024-12-05T12:06:49,765 INFO [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(1755): Closing region testtb-testExportWithChecksum,1,1733400409749.5c1512f0bac1eb033bb472e2b4a64235. 2024-12-05T12:06:49,765 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742325_1501 (size=65) 2024-12-05T12:06:49,766 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithChecksum,1,1733400409749.5c1512f0bac1eb033bb472e2b4a64235. 2024-12-05T12:06:49,766 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithChecksum,1,1733400409749.5c1512f0bac1eb033bb472e2b4a64235. after waiting 0 ms 2024-12-05T12:06:49,766 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithChecksum,1,1733400409749.5c1512f0bac1eb033bb472e2b4a64235. 2024-12-05T12:06:49,766 INFO [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(1973): Closed testtb-testExportWithChecksum,1,1733400409749.5c1512f0bac1eb033bb472e2b4a64235. 2024-12-05T12:06:49,766 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(1676): Region close journal for 5c1512f0bac1eb033bb472e2b4a64235: Waiting for close lock at 1733400409765Disabling compacts and flushes for region at 1733400409765Disabling writes for close at 1733400409766 (+1 ms)Writing region close event to WAL at 1733400409766Closed at 1733400409766 2024-12-05T12:06:49,766 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742325_1501 (size=65) 2024-12-05T12:06:49,766 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(898): Instantiated testtb-testExportWithChecksum,,1733400409749.ac3d10a1d44d11ee31615fff4b4a5947.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-05T12:06:49,766 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(1722): Closing ac3d10a1d44d11ee31615fff4b4a5947, disabling compactions & flushes 2024-12-05T12:06:49,766 INFO [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(1755): Closing region testtb-testExportWithChecksum,,1733400409749.ac3d10a1d44d11ee31615fff4b4a5947. 2024-12-05T12:06:49,766 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithChecksum,,1733400409749.ac3d10a1d44d11ee31615fff4b4a5947. 2024-12-05T12:06:49,766 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithChecksum,,1733400409749.ac3d10a1d44d11ee31615fff4b4a5947. after waiting 0 ms 2024-12-05T12:06:49,766 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithChecksum,,1733400409749.ac3d10a1d44d11ee31615fff4b4a5947. 2024-12-05T12:06:49,766 INFO [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(1973): Closed testtb-testExportWithChecksum,,1733400409749.ac3d10a1d44d11ee31615fff4b4a5947. 2024-12-05T12:06:49,766 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(1676): Region close journal for ac3d10a1d44d11ee31615fff4b4a5947: Waiting for close lock at 1733400409766Disabling compacts and flushes for region at 1733400409766Disabling writes for close at 1733400409766Writing region close event to WAL at 1733400409766Closed at 1733400409766 2024-12-05T12:06:49,767 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=213, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testtb-testExportWithChecksum execute state=CREATE_TABLE_ADD_TO_META 2024-12-05T12:06:49,767 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportWithChecksum,1,1733400409749.5c1512f0bac1eb033bb472e2b4a64235.","families":{"info":[{"qualifier":"regioninfo","vlen":64,"tag":[],"timestamp":"1733400409767"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733400409767"}]},"ts":"1733400409767"} 2024-12-05T12:06:49,768 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportWithChecksum,,1733400409749.ac3d10a1d44d11ee31615fff4b4a5947.","families":{"info":[{"qualifier":"regioninfo","vlen":64,"tag":[],"timestamp":"1733400409767"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733400409767"}]},"ts":"1733400409767"} 2024-12-05T12:06:49,769 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-12-05T12:06:49,770 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=213, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testtb-testExportWithChecksum execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-05T12:06:49,770 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithChecksum","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733400409770"}]},"ts":"1733400409770"} 2024-12-05T12:06:49,772 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithChecksum, state=ENABLING in hbase:meta 2024-12-05T12:06:49,772 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(204): Hosts are {80666a11a09f=0} racks are {/default-rack=0} 2024-12-05T12:06:49,773 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-05T12:06:49,773 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-05T12:06:49,773 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-05T12:06:49,773 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-05T12:06:49,773 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-05T12:06:49,773 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-05T12:06:49,773 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-05T12:06:49,773 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-05T12:06:49,773 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-05T12:06:49,773 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-05T12:06:49,773 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=214, ppid=213, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=ac3d10a1d44d11ee31615fff4b4a5947, ASSIGN}, {pid=215, ppid=213, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=5c1512f0bac1eb033bb472e2b4a64235, ASSIGN}] 2024-12-05T12:06:49,774 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=215, ppid=213, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=5c1512f0bac1eb033bb472e2b4a64235, ASSIGN 2024-12-05T12:06:49,774 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=214, ppid=213, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=ac3d10a1d44d11ee31615fff4b4a5947, ASSIGN 2024-12-05T12:06:49,775 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=215, ppid=213, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=5c1512f0bac1eb033bb472e2b4a64235, ASSIGN; state=OFFLINE, location=80666a11a09f,42913,1733400130845; forceNewPlan=false, retain=false 2024-12-05T12:06:49,775 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(269): Starting pid=214, ppid=213, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=ac3d10a1d44d11ee31615fff4b4a5947, ASSIGN; state=OFFLINE, location=80666a11a09f,42305,1733400131012; forceNewPlan=false, retain=false 2024-12-05T12:06:49,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=213 2024-12-05T12:06:49,926 INFO [80666a11a09f:34947 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-12-05T12:06:49,927 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=214 updating hbase:meta row=ac3d10a1d44d11ee31615fff4b4a5947, regionState=OPENING, regionLocation=80666a11a09f,42305,1733400131012 2024-12-05T12:06:49,927 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=215 updating hbase:meta row=5c1512f0bac1eb033bb472e2b4a64235, regionState=OPENING, regionLocation=80666a11a09f,42913,1733400130845 2024-12-05T12:06:49,931 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=215, ppid=213, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=5c1512f0bac1eb033bb472e2b4a64235, ASSIGN because future has completed 2024-12-05T12:06:49,932 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=216, ppid=215, state=RUNNABLE, hasLock=false; OpenRegionProcedure 5c1512f0bac1eb033bb472e2b4a64235, server=80666a11a09f,42913,1733400130845}] 2024-12-05T12:06:49,933 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=214, ppid=213, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=ac3d10a1d44d11ee31615fff4b4a5947, ASSIGN because future has completed 2024-12-05T12:06:49,933 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=217, ppid=214, state=RUNNABLE, hasLock=false; OpenRegionProcedure ac3d10a1d44d11ee31615fff4b4a5947, server=80666a11a09f,42305,1733400131012}] 2024-12-05T12:06:50,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=213 2024-12-05T12:06:50,090 INFO [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] handler.AssignRegionHandler(132): Open testtb-testExportWithChecksum,1,1733400409749.5c1512f0bac1eb033bb472e2b4a64235. 2024-12-05T12:06:50,091 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] regionserver.HRegion(7752): Opening region: {ENCODED => 5c1512f0bac1eb033bb472e2b4a64235, NAME => 'testtb-testExportWithChecksum,1,1733400409749.5c1512f0bac1eb033bb472e2b4a64235.', STARTKEY => '1', ENDKEY => ''} 2024-12-05T12:06:50,091 INFO [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] handler.AssignRegionHandler(132): Open testtb-testExportWithChecksum,,1733400409749.ac3d10a1d44d11ee31615fff4b4a5947. 2024-12-05T12:06:50,092 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] regionserver.HRegion(7752): Opening region: {ENCODED => ac3d10a1d44d11ee31615fff4b4a5947, NAME => 'testtb-testExportWithChecksum,,1733400409749.ac3d10a1d44d11ee31615fff4b4a5947.', STARTKEY => '', ENDKEY => '1'} 2024-12-05T12:06:50,092 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportWithChecksum,1,1733400409749.5c1512f0bac1eb033bb472e2b4a64235. service=AccessControlService 2024-12-05T12:06:50,092 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportWithChecksum,,1733400409749.ac3d10a1d44d11ee31615fff4b4a5947. service=AccessControlService 2024-12-05T12:06:50,092 INFO [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-05T12:06:50,092 INFO [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-05T12:06:50,093 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportWithChecksum 5c1512f0bac1eb033bb472e2b4a64235 2024-12-05T12:06:50,093 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportWithChecksum ac3d10a1d44d11ee31615fff4b4a5947 2024-12-05T12:06:50,093 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] regionserver.HRegion(898): Instantiated testtb-testExportWithChecksum,1,1733400409749.5c1512f0bac1eb033bb472e2b4a64235.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-05T12:06:50,093 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] regionserver.HRegion(898): Instantiated testtb-testExportWithChecksum,,1733400409749.ac3d10a1d44d11ee31615fff4b4a5947.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-05T12:06:50,093 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] regionserver.HRegion(7794): checking encryption for 5c1512f0bac1eb033bb472e2b4a64235 2024-12-05T12:06:50,093 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] regionserver.HRegion(7794): checking encryption for ac3d10a1d44d11ee31615fff4b4a5947 2024-12-05T12:06:50,093 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] regionserver.HRegion(7797): checking classloading for 5c1512f0bac1eb033bb472e2b4a64235 2024-12-05T12:06:50,093 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] regionserver.HRegion(7797): checking classloading for ac3d10a1d44d11ee31615fff4b4a5947 2024-12-05T12:06:50,095 INFO [StoreOpener-5c1512f0bac1eb033bb472e2b4a64235-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 5c1512f0bac1eb033bb472e2b4a64235 2024-12-05T12:06:50,095 INFO [StoreOpener-ac3d10a1d44d11ee31615fff4b4a5947-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region ac3d10a1d44d11ee31615fff4b4a5947 2024-12-05T12:06:50,096 INFO [StoreOpener-5c1512f0bac1eb033bb472e2b4a64235-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 5c1512f0bac1eb033bb472e2b4a64235 columnFamilyName cf 2024-12-05T12:06:50,096 INFO [StoreOpener-ac3d10a1d44d11ee31615fff4b4a5947-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region ac3d10a1d44d11ee31615fff4b4a5947 columnFamilyName cf 2024-12-05T12:06:50,097 DEBUG [StoreOpener-ac3d10a1d44d11ee31615fff4b4a5947-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T12:06:50,097 DEBUG [StoreOpener-5c1512f0bac1eb033bb472e2b4a64235-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T12:06:50,097 INFO [StoreOpener-5c1512f0bac1eb033bb472e2b4a64235-1 {}] regionserver.HStore(327): Store=5c1512f0bac1eb033bb472e2b4a64235/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-05T12:06:50,097 INFO [StoreOpener-ac3d10a1d44d11ee31615fff4b4a5947-1 {}] regionserver.HStore(327): Store=ac3d10a1d44d11ee31615fff4b4a5947/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-05T12:06:50,097 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] regionserver.HRegion(1038): replaying wal for 5c1512f0bac1eb033bb472e2b4a64235 2024-12-05T12:06:50,097 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] regionserver.HRegion(1038): replaying wal for ac3d10a1d44d11ee31615fff4b4a5947 2024-12-05T12:06:50,098 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportWithChecksum/ac3d10a1d44d11ee31615fff4b4a5947 2024-12-05T12:06:50,098 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportWithChecksum/5c1512f0bac1eb033bb472e2b4a64235 2024-12-05T12:06:50,098 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportWithChecksum/ac3d10a1d44d11ee31615fff4b4a5947 2024-12-05T12:06:50,098 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportWithChecksum/5c1512f0bac1eb033bb472e2b4a64235 2024-12-05T12:06:50,098 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] regionserver.HRegion(1048): stopping wal replay for 5c1512f0bac1eb033bb472e2b4a64235 2024-12-05T12:06:50,098 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] regionserver.HRegion(1048): stopping wal replay for ac3d10a1d44d11ee31615fff4b4a5947 2024-12-05T12:06:50,098 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] regionserver.HRegion(1060): Cleaning up temporary data for 5c1512f0bac1eb033bb472e2b4a64235 2024-12-05T12:06:50,098 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] regionserver.HRegion(1060): Cleaning up temporary data for ac3d10a1d44d11ee31615fff4b4a5947 2024-12-05T12:06:50,100 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] regionserver.HRegion(1093): writing seq id for ac3d10a1d44d11ee31615fff4b4a5947 2024-12-05T12:06:50,100 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] regionserver.HRegion(1093): writing seq id for 5c1512f0bac1eb033bb472e2b4a64235 2024-12-05T12:06:50,101 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportWithChecksum/5c1512f0bac1eb033bb472e2b4a64235/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-05T12:06:50,101 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportWithChecksum/ac3d10a1d44d11ee31615fff4b4a5947/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-05T12:06:50,101 INFO [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] regionserver.HRegion(1114): Opened 5c1512f0bac1eb033bb472e2b4a64235; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=72138947, jitterRate=0.0749540776014328}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-05T12:06:50,102 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 5c1512f0bac1eb033bb472e2b4a64235 2024-12-05T12:06:50,102 INFO [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] regionserver.HRegion(1114): Opened ac3d10a1d44d11ee31615fff4b4a5947; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=70452377, jitterRate=0.04982222616672516}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-05T12:06:50,102 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] regionserver.HRegion(1122): Running coprocessor post-open hooks for ac3d10a1d44d11ee31615fff4b4a5947 2024-12-05T12:06:50,102 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] regionserver.HRegion(1006): Region open journal for 5c1512f0bac1eb033bb472e2b4a64235: Running coprocessor pre-open hook at 1733400410093Writing region info on filesystem at 1733400410093Initializing all the Stores at 1733400410094 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733400410094Cleaning up temporary data from old regions at 1733400410098 (+4 ms)Running coprocessor post-open hooks at 1733400410102 (+4 ms)Region opened successfully at 1733400410102 2024-12-05T12:06:50,102 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] regionserver.HRegion(1006): Region open journal for ac3d10a1d44d11ee31615fff4b4a5947: Running coprocessor pre-open hook at 1733400410093Writing region info on filesystem at 1733400410093Initializing all the Stores at 1733400410094 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733400410094Cleaning up temporary data from old regions at 1733400410098 (+4 ms)Running coprocessor post-open hooks at 1733400410102 (+4 ms)Region opened successfully at 1733400410102 2024-12-05T12:06:50,103 INFO [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportWithChecksum,1,1733400409749.5c1512f0bac1eb033bb472e2b4a64235., pid=216, masterSystemTime=1733400410085 2024-12-05T12:06:50,103 INFO [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportWithChecksum,,1733400409749.ac3d10a1d44d11ee31615fff4b4a5947., pid=217, masterSystemTime=1733400410086 2024-12-05T12:06:50,104 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportWithChecksum,,1733400409749.ac3d10a1d44d11ee31615fff4b4a5947. 2024-12-05T12:06:50,104 INFO [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] handler.AssignRegionHandler(153): Opened testtb-testExportWithChecksum,,1733400409749.ac3d10a1d44d11ee31615fff4b4a5947. 2024-12-05T12:06:50,105 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=214 updating hbase:meta row=ac3d10a1d44d11ee31615fff4b4a5947, regionState=OPEN, openSeqNum=2, regionLocation=80666a11a09f,42305,1733400131012 2024-12-05T12:06:50,105 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportWithChecksum,1,1733400409749.5c1512f0bac1eb033bb472e2b4a64235. 2024-12-05T12:06:50,105 INFO [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] handler.AssignRegionHandler(153): Opened testtb-testExportWithChecksum,1,1733400409749.5c1512f0bac1eb033bb472e2b4a64235. 2024-12-05T12:06:50,105 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=215 updating hbase:meta row=5c1512f0bac1eb033bb472e2b4a64235, regionState=OPEN, openSeqNum=2, regionLocation=80666a11a09f,42913,1733400130845 2024-12-05T12:06:50,106 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=217, ppid=214, state=RUNNABLE, hasLock=false; OpenRegionProcedure ac3d10a1d44d11ee31615fff4b4a5947, server=80666a11a09f,42305,1733400131012 because future has completed 2024-12-05T12:06:50,107 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=216, ppid=215, state=RUNNABLE, hasLock=false; OpenRegionProcedure 5c1512f0bac1eb033bb472e2b4a64235, server=80666a11a09f,42913,1733400130845 because future has completed 2024-12-05T12:06:50,109 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=217, resume processing ppid=214 2024-12-05T12:06:50,109 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=217, ppid=214, state=SUCCESS, hasLock=false; OpenRegionProcedure ac3d10a1d44d11ee31615fff4b4a5947, server=80666a11a09f,42305,1733400131012 in 174 msec 2024-12-05T12:06:50,110 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=216, resume processing ppid=215 2024-12-05T12:06:50,110 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=216, ppid=215, state=SUCCESS, hasLock=false; OpenRegionProcedure 5c1512f0bac1eb033bb472e2b4a64235, server=80666a11a09f,42913,1733400130845 in 176 msec 2024-12-05T12:06:50,110 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=214, ppid=213, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=ac3d10a1d44d11ee31615fff4b4a5947, ASSIGN in 336 msec 2024-12-05T12:06:50,111 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=215, resume processing ppid=213 2024-12-05T12:06:50,111 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=215, ppid=213, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=5c1512f0bac1eb033bb472e2b4a64235, ASSIGN in 337 msec 2024-12-05T12:06:50,112 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=213, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testtb-testExportWithChecksum execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-05T12:06:50,112 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithChecksum","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733400410112"}]},"ts":"1733400410112"} 2024-12-05T12:06:50,113 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithChecksum, state=ENABLED in hbase:meta 2024-12-05T12:06:50,114 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=213, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportWithChecksum execute state=CREATE_TABLE_POST_OPERATION 2024-12-05T12:06:50,114 DEBUG [PEWorker-3 {}] access.PermissionStorage(177): Writing permission with rowKey testtb-testExportWithChecksum jenkins: RWXCA 2024-12-05T12:06:50,117 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42305 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportWithChecksum], kv [jenkins: RWXCA] 2024-12-05T12:06:50,160 DEBUG [pool-63-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42913-0x101a6a890970001, quorum=127.0.0.1:49240, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-05T12:06:50,160 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37629-0x101a6a890970002, quorum=127.0.0.1:49240, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-05T12:06:50,160 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42305-0x101a6a890970003, quorum=127.0.0.1:49240, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-05T12:06:50,160 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34947-0x101a6a890970000, quorum=127.0.0.1:49240, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-05T12:06:50,171 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-05T12:06:50,171 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-05T12:06:50,171 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-05T12:06:50,171 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-05T12:06:50,171 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithChecksum with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithChecksum \x00 \x01 \x02 \x03 \x04 2024-12-05T12:06:50,171 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithChecksum with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithChecksum \x00 \x01 \x02 \x03 \x04 2024-12-05T12:06:50,171 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithChecksum with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithChecksum \x00 \x01 \x02 \x03 \x04 2024-12-05T12:06:50,172 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithChecksum with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithChecksum \x00 \x01 \x02 \x03 \x04 2024-12-05T12:06:50,173 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=213, state=SUCCESS, hasLock=false; CreateTableProcedure table=testtb-testExportWithChecksum in 419 msec 2024-12-05T12:06:50,361 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithChecksum 2024-12-05T12:06:50,361 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithChecksum Metrics about Tables on a single HBase RegionServer 2024-12-05T12:06:50,362 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testEmptyExportFileSystemState 2024-12-05T12:06:50,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=213 2024-12-05T12:06:50,379 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testtb-testExportWithChecksum completed 2024-12-05T12:06:50,380 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithChecksum,, stopping at row=testtb-testExportWithChecksum ,, for max=2147483647 with caching=100 2024-12-05T12:06:50,385 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testExportWithChecksum 2024-12-05T12:06:50,385 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testExportWithChecksum,,1733400409749.ac3d10a1d44d11ee31615fff4b4a5947. 2024-12-05T12:06:50,385 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-05T12:06:50,388 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithChecksum,, stopping at row=testtb-testExportWithChecksum ,, for max=2147483647 with caching=100 2024-12-05T12:06:50,393 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithChecksum,, stopping at row=testtb-testExportWithChecksum ,, for max=2147483647 with caching=100 2024-12-05T12:06:50,399 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithChecksum,, stopping at row=testtb-testExportWithChecksum ,, for max=2147483647 with caching=100 2024-12-05T12:06:50,402 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } 2024-12-05T12:06:50,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1733400410402 (current time:1733400410402). 2024-12-05T12:06:50,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-05T12:06:50,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot emptySnaptb0-testExportWithChecksum VERSION not specified, setting to 2 2024-12-05T12:06:50,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-05T12:06:50,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6435a711, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T12:06:50,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] client.ClusterIdFetcher(90): Going to request 80666a11a09f,34947,-1 for getting cluster id 2024-12-05T12:06:50,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-05T12:06:50,405 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'af3bc8f5-5d92-4279-9ca6-4b73c59b8f54' 2024-12-05T12:06:50,405 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-05T12:06:50,405 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "af3bc8f5-5d92-4279-9ca6-4b73c59b8f54" 2024-12-05T12:06:50,406 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7ee35c67, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T12:06:50,406 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [80666a11a09f,34947,-1] 2024-12-05T12:06:50,406 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-05T12:06:50,406 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T12:06:50,407 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34210, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-05T12:06:50,407 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@65e90c17, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T12:06:50,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-05T12:06:50,408 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=80666a11a09f,37629,1733400130958, seqNum=-1] 2024-12-05T12:06:50,408 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-05T12:06:50,409 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58220, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-05T12:06:50,410 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947. 2024-12-05T12:06:50,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-05T12:06:50,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T12:06:50,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T12:06:50,410 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-05T12:06:50,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@12032a22, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T12:06:50,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] client.ClusterIdFetcher(90): Going to request 80666a11a09f,34947,-1 for getting cluster id 2024-12-05T12:06:50,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-05T12:06:50,411 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'af3bc8f5-5d92-4279-9ca6-4b73c59b8f54' 2024-12-05T12:06:50,412 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-05T12:06:50,412 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "af3bc8f5-5d92-4279-9ca6-4b73c59b8f54" 2024-12-05T12:06:50,412 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1d9dbbb8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T12:06:50,412 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [80666a11a09f,34947,-1] 2024-12-05T12:06:50,412 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-05T12:06:50,412 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T12:06:50,413 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34220, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-05T12:06:50,413 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2b461ef3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T12:06:50,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-05T12:06:50,414 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=80666a11a09f,37629,1733400130958, seqNum=-1] 2024-12-05T12:06:50,414 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-05T12:06:50,415 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58222, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-05T12:06:50,416 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportWithChecksum', locateType=CURRENT is [region=hbase:acl,,1733400133831.02119c1ced80736cbd0ab02ca19ee30c., hostname=80666a11a09f,42305,1733400131012, seqNum=2] 2024-12-05T12:06:50,416 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-05T12:06:50,417 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57700, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-05T12:06:50,418 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947. 2024-12-05T12:06:50,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor153.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-05T12:06:50,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T12:06:50,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T12:06:50,418 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-05T12:06:50,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportWithChecksum], kv [jenkins: RWXCA] 2024-12-05T12:06:50,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-05T12:06:50,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] procedure2.ProcedureExecutor(1139): Stored pid=218, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=218, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } 2024-12-05T12:06:50,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 }, snapshot procedure id = 218 2024-12-05T12:06:50,421 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=218, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=218, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-05T12:06:50,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=218 2024-12-05T12:06:50,422 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=218, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=218, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-05T12:06:50,425 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=218, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=218, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-05T12:06:50,431 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742326_1502 (size=161) 2024-12-05T12:06:50,432 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742326_1502 (size=161) 2024-12-05T12:06:50,432 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742326_1502 (size=161) 2024-12-05T12:06:50,433 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=218, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=218, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-05T12:06:50,433 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=219, ppid=218, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure ac3d10a1d44d11ee31615fff4b4a5947}, {pid=220, ppid=218, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 5c1512f0bac1eb033bb472e2b4a64235}] 2024-12-05T12:06:50,434 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=220, ppid=218, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 5c1512f0bac1eb033bb472e2b4a64235 2024-12-05T12:06:50,434 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=219, ppid=218, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure ac3d10a1d44d11ee31615fff4b4a5947 2024-12-05T12:06:50,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=218 2024-12-05T12:06:50,587 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42305 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=219 2024-12-05T12:06:50,587 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42913 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=220 2024-12-05T12:06:50,587 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=219}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithChecksum,,1733400409749.ac3d10a1d44d11ee31615fff4b4a5947. 2024-12-05T12:06:50,587 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=220}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithChecksum,1,1733400409749.5c1512f0bac1eb033bb472e2b4a64235. 2024-12-05T12:06:50,587 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=219}] regionserver.HRegion(2603): Flush status journal for ac3d10a1d44d11ee31615fff4b4a5947: 2024-12-05T12:06:50,587 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=220}] regionserver.HRegion(2603): Flush status journal for 5c1512f0bac1eb033bb472e2b4a64235: 2024-12-05T12:06:50,587 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=219}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithChecksum,,1733400409749.ac3d10a1d44d11ee31615fff4b4a5947. for emptySnaptb0-testExportWithChecksum completed. 2024-12-05T12:06:50,587 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=220}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithChecksum,1,1733400409749.5c1512f0bac1eb033bb472e2b4a64235. for emptySnaptb0-testExportWithChecksum completed. 2024-12-05T12:06:50,588 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=220}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithChecksum,1,1733400409749.5c1512f0bac1eb033bb472e2b4a64235.' region-info for snapshot=emptySnaptb0-testExportWithChecksum 2024-12-05T12:06:50,588 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=219}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithChecksum,,1733400409749.ac3d10a1d44d11ee31615fff4b4a5947.' region-info for snapshot=emptySnaptb0-testExportWithChecksum 2024-12-05T12:06:50,588 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=220}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-05T12:06:50,588 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=219}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-05T12:06:50,588 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=220}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-05T12:06:50,588 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=219}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-05T12:06:50,596 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742327_1503 (size=68) 2024-12-05T12:06:50,596 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742327_1503 (size=68) 2024-12-05T12:06:50,597 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742328_1504 (size=68) 2024-12-05T12:06:50,597 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742327_1503 (size=68) 2024-12-05T12:06:50,598 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742328_1504 (size=68) 2024-12-05T12:06:50,598 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=220}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithChecksum,1,1733400409749.5c1512f0bac1eb033bb472e2b4a64235. 2024-12-05T12:06:50,598 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=220}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=220 2024-12-05T12:06:50,598 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742328_1504 (size=68) 2024-12-05T12:06:50,598 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=219}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithChecksum,,1733400409749.ac3d10a1d44d11ee31615fff4b4a5947. 2024-12-05T12:06:50,598 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=219}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=219 2024-12-05T12:06:50,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] master.HMaster(4169): Remote procedure done, pid=220 2024-12-05T12:06:50,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] master.HMaster(4169): Remote procedure done, pid=219 2024-12-05T12:06:50,599 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportWithChecksum on region 5c1512f0bac1eb033bb472e2b4a64235 2024-12-05T12:06:50,599 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportWithChecksum on region ac3d10a1d44d11ee31615fff4b4a5947 2024-12-05T12:06:50,599 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=219, ppid=218, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure ac3d10a1d44d11ee31615fff4b4a5947 2024-12-05T12:06:50,599 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=220, ppid=218, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 5c1512f0bac1eb033bb472e2b4a64235 2024-12-05T12:06:50,605 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=219, ppid=218, state=SUCCESS, hasLock=false; SnapshotRegionProcedure ac3d10a1d44d11ee31615fff4b4a5947 in 171 msec 2024-12-05T12:06:50,606 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=220, resume processing ppid=218 2024-12-05T12:06:50,606 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=220, ppid=218, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 5c1512f0bac1eb033bb472e2b4a64235 in 171 msec 2024-12-05T12:06:50,606 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=218, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=218, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-05T12:06:50,607 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=218, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=218, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-05T12:06:50,608 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-12-05T12:06:50,608 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-12-05T12:06:50,608 DEBUG [MobRegionSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T12:06:50,608 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(328): No files under family: cf 2024-12-05T12:06:50,613 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742329_1505 (size=60) 2024-12-05T12:06:50,613 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742329_1505 (size=60) 2024-12-05T12:06:50,613 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742329_1505 (size=60) 2024-12-05T12:06:50,614 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=218, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=218, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-05T12:06:50,614 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testExportWithChecksum 2024-12-05T12:06:50,615 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/.hbase-snapshot/.tmp/emptySnaptb0-testExportWithChecksum 2024-12-05T12:06:50,622 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742330_1506 (size=641) 2024-12-05T12:06:50,623 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742330_1506 (size=641) 2024-12-05T12:06:50,623 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742330_1506 (size=641) 2024-12-05T12:06:50,625 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=218, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=218, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-05T12:06:50,630 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=218, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=218, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-05T12:06:50,630 DEBUG [PEWorker-5 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/.hbase-snapshot/.tmp/emptySnaptb0-testExportWithChecksum to hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/.hbase-snapshot/emptySnaptb0-testExportWithChecksum 2024-12-05T12:06:50,631 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=218, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=218, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-05T12:06:50,632 DEBUG [PEWorker-5 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 }, snapshot procedure id = 218 2024-12-05T12:06:50,633 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=218, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=218, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } in 213 msec 2024-12-05T12:06:50,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=218 2024-12-05T12:06:50,739 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportWithChecksum completed 2024-12-05T12:06:50,749 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42305 {}] regionserver.HRegion(8528): writing data to region testtb-testExportWithChecksum,,1733400409749.ac3d10a1d44d11ee31615fff4b4a5947. with WAL disabled. Data may be lost in the event of a crash. 2024-12-05T12:06:50,751 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42913 {}] regionserver.HRegion(8528): writing data to region testtb-testExportWithChecksum,1,1733400409749.5c1512f0bac1eb033bb472e2b4a64235. with WAL disabled. Data may be lost in the event of a crash. 2024-12-05T12:06:50,752 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithChecksum,, stopping at row=testtb-testExportWithChecksum ,, for max=2147483647 with caching=100 2024-12-05T12:06:50,754 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testExportWithChecksum 2024-12-05T12:06:50,754 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testExportWithChecksum,,1733400409749.ac3d10a1d44d11ee31615fff4b4a5947. 2024-12-05T12:06:50,754 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-05T12:06:50,755 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithChecksum,, stopping at row=testtb-testExportWithChecksum ,, for max=2147483647 with caching=100 2024-12-05T12:06:50,760 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithChecksum,, stopping at row=testtb-testExportWithChecksum ,, for max=2147483647 with caching=100 2024-12-05T12:06:50,765 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithChecksum,, stopping at row=testtb-testExportWithChecksum ,, for max=2147483647 with caching=100 2024-12-05T12:06:50,767 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } 2024-12-05T12:06:50,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1733400410768 (current time:1733400410768). 2024-12-05T12:06:50,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-05T12:06:50,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snaptb0-testExportWithChecksum VERSION not specified, setting to 2 2024-12-05T12:06:50,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-05T12:06:50,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@60923bd8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T12:06:50,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] client.ClusterIdFetcher(90): Going to request 80666a11a09f,34947,-1 for getting cluster id 2024-12-05T12:06:50,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-05T12:06:50,769 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'af3bc8f5-5d92-4279-9ca6-4b73c59b8f54' 2024-12-05T12:06:50,769 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-05T12:06:50,769 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "af3bc8f5-5d92-4279-9ca6-4b73c59b8f54" 2024-12-05T12:06:50,769 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@d7b57dd, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T12:06:50,769 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [80666a11a09f,34947,-1] 2024-12-05T12:06:50,770 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-05T12:06:50,770 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T12:06:50,770 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34248, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-05T12:06:50,771 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5f278df9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T12:06:50,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-05T12:06:50,772 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=80666a11a09f,37629,1733400130958, seqNum=-1] 2024-12-05T12:06:50,772 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-05T12:06:50,773 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58232, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-05T12:06:50,774 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947. 2024-12-05T12:06:50,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-05T12:06:50,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T12:06:50,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T12:06:50,774 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-05T12:06:50,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@22b63fdb, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T12:06:50,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] client.ClusterIdFetcher(90): Going to request 80666a11a09f,34947,-1 for getting cluster id 2024-12-05T12:06:50,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-05T12:06:50,775 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'af3bc8f5-5d92-4279-9ca6-4b73c59b8f54' 2024-12-05T12:06:50,775 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-05T12:06:50,775 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "af3bc8f5-5d92-4279-9ca6-4b73c59b8f54" 2024-12-05T12:06:50,775 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@277d0da8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T12:06:50,775 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [80666a11a09f,34947,-1] 2024-12-05T12:06:50,775 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-05T12:06:50,776 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T12:06:50,776 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34254, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-05T12:06:50,776 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7940d2e8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T12:06:50,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-05T12:06:50,777 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=80666a11a09f,37629,1733400130958, seqNum=-1] 2024-12-05T12:06:50,778 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-05T12:06:50,778 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58244, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-05T12:06:50,780 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportWithChecksum', locateType=CURRENT is [region=hbase:acl,,1733400133831.02119c1ced80736cbd0ab02ca19ee30c., hostname=80666a11a09f,42305,1733400131012, seqNum=2] 2024-12-05T12:06:50,780 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-05T12:06:50,780 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57702, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-05T12:06:50,781 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947. 2024-12-05T12:06:50,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor153.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-05T12:06:50,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T12:06:50,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T12:06:50,781 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-05T12:06:50,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportWithChecksum], kv [jenkins: RWXCA] 2024-12-05T12:06:50,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-05T12:06:50,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] procedure2.ProcedureExecutor(1139): Stored pid=221, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=221, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } 2024-12-05T12:06:50,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 }, snapshot procedure id = 221 2024-12-05T12:06:50,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=221 2024-12-05T12:06:50,784 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=221, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=221, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-05T12:06:50,785 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=221, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=221, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-05T12:06:50,787 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=221, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=221, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-05T12:06:50,793 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742331_1507 (size=156) 2024-12-05T12:06:50,793 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742331_1507 (size=156) 2024-12-05T12:06:50,793 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742331_1507 (size=156) 2024-12-05T12:06:50,794 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=221, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=221, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-05T12:06:50,794 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=222, ppid=221, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure ac3d10a1d44d11ee31615fff4b4a5947}, {pid=223, ppid=221, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 5c1512f0bac1eb033bb472e2b4a64235}] 2024-12-05T12:06:50,795 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=222, ppid=221, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure ac3d10a1d44d11ee31615fff4b4a5947 2024-12-05T12:06:50,795 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=223, ppid=221, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 5c1512f0bac1eb033bb472e2b4a64235 2024-12-05T12:06:50,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=221 2024-12-05T12:06:50,948 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42305 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=222 2024-12-05T12:06:50,948 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42913 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=223 2024-12-05T12:06:50,948 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=223}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithChecksum,1,1733400409749.5c1512f0bac1eb033bb472e2b4a64235. 2024-12-05T12:06:50,948 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=222}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithChecksum,,1733400409749.ac3d10a1d44d11ee31615fff4b4a5947. 2024-12-05T12:06:50,949 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=223}] regionserver.HRegion(2902): Flushing 5c1512f0bac1eb033bb472e2b4a64235 1/1 column families, dataSize=3.06 KB heapSize=6.86 KB 2024-12-05T12:06:50,949 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=222}] regionserver.HRegion(2902): Flushing ac3d10a1d44d11ee31615fff4b4a5947 1/1 column families, dataSize=199 B heapSize=688 B 2024-12-05T12:06:50,970 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=222}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024120570882894d19a49e8abd31fe832b76c4a_ac3d10a1d44d11ee31615fff4b4a5947 is 71, key is 06aeee6ef705c349f08a50519b09a2dd/cf:q/1733400410749/Put/seqid=0 2024-12-05T12:06:50,970 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=223}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b20241205f71fd37660314339932a04a4dcbd7e8b_5c1512f0bac1eb033bb472e2b4a64235 is 71, key is 10dd58f106be8ef14cd1142b6adcdbe7/cf:q/1733400410751/Put/seqid=0 2024-12-05T12:06:50,976 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742332_1508 (size=5102) 2024-12-05T12:06:50,976 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742332_1508 (size=5102) 2024-12-05T12:06:50,976 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742332_1508 (size=5102) 2024-12-05T12:06:50,977 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=222}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T12:06:50,977 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742333_1509 (size=8171) 2024-12-05T12:06:50,977 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742333_1509 (size=8171) 2024-12-05T12:06:50,977 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742333_1509 (size=8171) 2024-12-05T12:06:50,978 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=223}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T12:06:50,981 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=222}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024120570882894d19a49e8abd31fe832b76c4a_ac3d10a1d44d11ee31615fff4b4a5947 to hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/mobdir/data/default/testtb-testExportWithChecksum/079394da860334b7f5313f35a50f5bc6/cf/d41d8cd98f00b204e9800998ecf8427e2024120570882894d19a49e8abd31fe832b76c4a_ac3d10a1d44d11ee31615fff4b4a5947 2024-12-05T12:06:50,981 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=223}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b20241205f71fd37660314339932a04a4dcbd7e8b_5c1512f0bac1eb033bb472e2b4a64235 to hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/mobdir/data/default/testtb-testExportWithChecksum/079394da860334b7f5313f35a50f5bc6/cf/c4ca4238a0b923820dcc509a6f75849b20241205f71fd37660314339932a04a4dcbd7e8b_5c1512f0bac1eb033bb472e2b4a64235 2024-12-05T12:06:50,982 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=223}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportWithChecksum/5c1512f0bac1eb033bb472e2b4a64235/.tmp/cf/629570661b704ad0917762179199ef0b, store: [table=testtb-testExportWithChecksum family=cf region=5c1512f0bac1eb033bb472e2b4a64235] 2024-12-05T12:06:50,982 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=222}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportWithChecksum/ac3d10a1d44d11ee31615fff4b4a5947/.tmp/cf/fa5423db36394c1ebcdc47a3d35a595f, store: [table=testtb-testExportWithChecksum family=cf region=ac3d10a1d44d11ee31615fff4b4a5947] 2024-12-05T12:06:50,983 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=223}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportWithChecksum/5c1512f0bac1eb033bb472e2b4a64235/.tmp/cf/629570661b704ad0917762179199ef0b is 206, key is 15e4c6acb9452d0b2846d5f9df1e40a67/cf:q/1733400410751/Put/seqid=0 2024-12-05T12:06:50,983 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=222}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportWithChecksum/ac3d10a1d44d11ee31615fff4b4a5947/.tmp/cf/fa5423db36394c1ebcdc47a3d35a595f is 206, key is 0b73b4f7f8ff240171ac31286a28a2829/cf:q/1733400410749/Put/seqid=0 2024-12-05T12:06:50,992 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742335_1511 (size=5906) 2024-12-05T12:06:50,992 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742334_1510 (size=14853) 2024-12-05T12:06:50,992 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742334_1510 (size=14853) 2024-12-05T12:06:50,992 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742335_1511 (size=5906) 2024-12-05T12:06:50,992 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742334_1510 (size=14853) 2024-12-05T12:06:50,992 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742335_1511 (size=5906) 2024-12-05T12:06:50,993 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=222}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=6, memsize=199, hasBloomFilter=true, into tmp file hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportWithChecksum/ac3d10a1d44d11ee31615fff4b4a5947/.tmp/cf/fa5423db36394c1ebcdc47a3d35a595f 2024-12-05T12:06:50,993 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=223}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=6, memsize=3.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportWithChecksum/5c1512f0bac1eb033bb472e2b4a64235/.tmp/cf/629570661b704ad0917762179199ef0b 2024-12-05T12:06:50,996 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=223}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportWithChecksum/5c1512f0bac1eb033bb472e2b4a64235/.tmp/cf/629570661b704ad0917762179199ef0b as hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportWithChecksum/5c1512f0bac1eb033bb472e2b4a64235/cf/629570661b704ad0917762179199ef0b 2024-12-05T12:06:50,997 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=222}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportWithChecksum/ac3d10a1d44d11ee31615fff4b4a5947/.tmp/cf/fa5423db36394c1ebcdc47a3d35a595f as hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportWithChecksum/ac3d10a1d44d11ee31615fff4b4a5947/cf/fa5423db36394c1ebcdc47a3d35a595f 2024-12-05T12:06:51,000 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=222}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportWithChecksum/ac3d10a1d44d11ee31615fff4b4a5947/cf/fa5423db36394c1ebcdc47a3d35a595f, entries=3, sequenceid=6, filesize=5.8 K 2024-12-05T12:06:51,000 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=223}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportWithChecksum/5c1512f0bac1eb033bb472e2b4a64235/cf/629570661b704ad0917762179199ef0b, entries=47, sequenceid=6, filesize=14.5 K 2024-12-05T12:06:51,001 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=222}] regionserver.HRegion(3140): Finished flush of dataSize ~199 B/199, heapSize ~672 B/672, currentSize=0 B/0 for ac3d10a1d44d11ee31615fff4b4a5947 in 52ms, sequenceid=6, compaction requested=false 2024-12-05T12:06:51,001 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=223}] regionserver.HRegion(3140): Finished flush of dataSize ~3.06 KB/3137, heapSize ~6.84 KB/7008, currentSize=0 B/0 for 5c1512f0bac1eb033bb472e2b4a64235 in 52ms, sequenceid=6, compaction requested=false 2024-12-05T12:06:51,001 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=223}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportWithChecksum' 2024-12-05T12:06:51,001 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=222}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportWithChecksum' 2024-12-05T12:06:51,001 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=222}] regionserver.HRegion(2603): Flush status journal for ac3d10a1d44d11ee31615fff4b4a5947: 2024-12-05T12:06:51,001 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=223}] regionserver.HRegion(2603): Flush status journal for 5c1512f0bac1eb033bb472e2b4a64235: 2024-12-05T12:06:51,001 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=223}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithChecksum,1,1733400409749.5c1512f0bac1eb033bb472e2b4a64235. for snaptb0-testExportWithChecksum completed. 2024-12-05T12:06:51,001 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=222}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithChecksum,,1733400409749.ac3d10a1d44d11ee31615fff4b4a5947. for snaptb0-testExportWithChecksum completed. 2024-12-05T12:06:51,002 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=222}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithChecksum,,1733400409749.ac3d10a1d44d11ee31615fff4b4a5947.' region-info for snapshot=snaptb0-testExportWithChecksum 2024-12-05T12:06:51,002 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=223}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithChecksum,1,1733400409749.5c1512f0bac1eb033bb472e2b4a64235.' region-info for snapshot=snaptb0-testExportWithChecksum 2024-12-05T12:06:51,002 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=223}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-05T12:06:51,002 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=222}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-05T12:06:51,002 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=222}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportWithChecksum/ac3d10a1d44d11ee31615fff4b4a5947/cf/fa5423db36394c1ebcdc47a3d35a595f] hfiles 2024-12-05T12:06:51,002 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=223}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportWithChecksum/5c1512f0bac1eb033bb472e2b4a64235/cf/629570661b704ad0917762179199ef0b] hfiles 2024-12-05T12:06:51,002 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=222}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportWithChecksum/ac3d10a1d44d11ee31615fff4b4a5947/cf/fa5423db36394c1ebcdc47a3d35a595f for snapshot=snaptb0-testExportWithChecksum 2024-12-05T12:06:51,002 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=223}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportWithChecksum/5c1512f0bac1eb033bb472e2b4a64235/cf/629570661b704ad0917762179199ef0b for snapshot=snaptb0-testExportWithChecksum 2024-12-05T12:06:51,007 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742337_1513 (size=107) 2024-12-05T12:06:51,007 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742336_1512 (size=107) 2024-12-05T12:06:51,007 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742336_1512 (size=107) 2024-12-05T12:06:51,007 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742337_1513 (size=107) 2024-12-05T12:06:51,007 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742336_1512 (size=107) 2024-12-05T12:06:51,007 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742337_1513 (size=107) 2024-12-05T12:06:51,008 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=222}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithChecksum,,1733400409749.ac3d10a1d44d11ee31615fff4b4a5947. 2024-12-05T12:06:51,008 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=222}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=222 2024-12-05T12:06:51,008 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=223}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithChecksum,1,1733400409749.5c1512f0bac1eb033bb472e2b4a64235. 2024-12-05T12:06:51,008 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=223}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=223 2024-12-05T12:06:51,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] master.HMaster(4169): Remote procedure done, pid=222 2024-12-05T12:06:51,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] master.HMaster(4169): Remote procedure done, pid=223 2024-12-05T12:06:51,008 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportWithChecksum on region 5c1512f0bac1eb033bb472e2b4a64235 2024-12-05T12:06:51,008 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportWithChecksum on region ac3d10a1d44d11ee31615fff4b4a5947 2024-12-05T12:06:51,008 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=223, ppid=221, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 5c1512f0bac1eb033bb472e2b4a64235 2024-12-05T12:06:51,008 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=222, ppid=221, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure ac3d10a1d44d11ee31615fff4b4a5947 2024-12-05T12:06:51,010 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=223, ppid=221, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 5c1512f0bac1eb033bb472e2b4a64235 in 215 msec 2024-12-05T12:06:51,011 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=222, resume processing ppid=221 2024-12-05T12:06:51,011 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=222, ppid=221, state=SUCCESS, hasLock=false; SnapshotRegionProcedure ac3d10a1d44d11ee31615fff4b4a5947 in 215 msec 2024-12-05T12:06:51,011 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=221, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=221, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-05T12:06:51,012 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=221, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=221, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-05T12:06:51,013 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-12-05T12:06:51,013 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-12-05T12:06:51,013 DEBUG [MobRegionSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T12:06:51,014 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(366): Adding snapshot references for [hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/mobdir/data/default/testtb-testExportWithChecksum/079394da860334b7f5313f35a50f5bc6/cf/c4ca4238a0b923820dcc509a6f75849b20241205f71fd37660314339932a04a4dcbd7e8b_5c1512f0bac1eb033bb472e2b4a64235, hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/mobdir/data/default/testtb-testExportWithChecksum/079394da860334b7f5313f35a50f5bc6/cf/d41d8cd98f00b204e9800998ecf8427e2024120570882894d19a49e8abd31fe832b76c4a_ac3d10a1d44d11ee31615fff4b4a5947] hfiles 2024-12-05T12:06:51,014 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (1/2): hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/mobdir/data/default/testtb-testExportWithChecksum/079394da860334b7f5313f35a50f5bc6/cf/c4ca4238a0b923820dcc509a6f75849b20241205f71fd37660314339932a04a4dcbd7e8b_5c1512f0bac1eb033bb472e2b4a64235 2024-12-05T12:06:51,014 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (2/2): hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/mobdir/data/default/testtb-testExportWithChecksum/079394da860334b7f5313f35a50f5bc6/cf/d41d8cd98f00b204e9800998ecf8427e2024120570882894d19a49e8abd31fe832b76c4a_ac3d10a1d44d11ee31615fff4b4a5947 2024-12-05T12:06:51,019 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742338_1514 (size=291) 2024-12-05T12:06:51,019 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742338_1514 (size=291) 2024-12-05T12:06:51,020 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742338_1514 (size=291) 2024-12-05T12:06:51,021 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=221, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=221, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-05T12:06:51,021 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testExportWithChecksum 2024-12-05T12:06:51,021 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/.hbase-snapshot/.tmp/snaptb0-testExportWithChecksum 2024-12-05T12:06:51,029 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742339_1515 (size=951) 2024-12-05T12:06:51,029 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742339_1515 (size=951) 2024-12-05T12:06:51,029 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742339_1515 (size=951) 2024-12-05T12:06:51,036 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=221, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=221, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-05T12:06:51,041 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=221, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=221, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-05T12:06:51,041 DEBUG [PEWorker-4 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/.hbase-snapshot/.tmp/snaptb0-testExportWithChecksum to hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/.hbase-snapshot/snaptb0-testExportWithChecksum 2024-12-05T12:06:51,042 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=221, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=221, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-05T12:06:51,042 DEBUG [PEWorker-4 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 }, snapshot procedure id = 221 2024-12-05T12:06:51,043 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=221, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=221, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } in 260 msec 2024-12-05T12:06:51,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=221 2024-12-05T12:06:51,098 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportWithChecksum completed 2024-12-05T12:06:51,099 INFO [Time-limited test {}] snapshot.TestExportSnapshot(523): Local export destination path: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/b25dcafd-a00c-c924-93f8-4d057d2ca201/local-export-1733400411098 2024-12-05T12:06:51,099 INFO [Time-limited test {}] snapshot.TestExportSnapshot(542): tgtFsUri=file:///, tgtDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/b25dcafd-a00c-c924-93f8-4d057d2ca201/local-export-1733400411098, rawTgtDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/b25dcafd-a00c-c924-93f8-4d057d2ca201/local-export-1733400411098, srcFsUri=hdfs://localhost:37541, srcDir=hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300 2024-12-05T12:06:51,125 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1094): inputFs=hdfs://localhost:37541, inputRoot=hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300 2024-12-05T12:06:51,125 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1095): outputFs=org.apache.hadoop.fs.LocalFileSystem@684dc976, outputRoot=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/b25dcafd-a00c-c924-93f8-4d057d2ca201/local-export-1733400411098, skipTmp=false, initialOutputSnapshotDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/b25dcafd-a00c-c924-93f8-4d057d2ca201/local-export-1733400411098/.hbase-snapshot/.tmp/snaptb0-testExportWithChecksum 2024-12-05T12:06:51,127 INFO [Time-limited test {}] snapshot.ExportSnapshot(1104): Verify the source snapshot's expiration status and integrity. 2024-12-05T12:06:51,130 INFO [Time-limited test {}] snapshot.ExportSnapshot(1162): Copy Snapshot Manifest from hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/.hbase-snapshot/snaptb0-testExportWithChecksum to file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/b25dcafd-a00c-c924-93f8-4d057d2ca201/local-export-1733400411098/.hbase-snapshot/.tmp/snaptb0-testExportWithChecksum 2024-12-05T12:06:51,150 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-common/target/hbase-common-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-05T12:06:51,150 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-protocol-shaded/target/hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-05T12:06:51,150 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-client/target/hbase-client-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-05T12:06:52,034 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/b25dcafd-a00c-c924-93f8-4d057d2ca201/hadoop-10327198140120070544.jar 2024-12-05T12:06:52,035 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-05T12:06:52,035 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-05T12:06:52,102 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/b25dcafd-a00c-c924-93f8-4d057d2ca201/hadoop-17261180271383996304.jar 2024-12-05T12:06:52,103 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics/target/hbase-metrics-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-05T12:06:52,103 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics-api/target/hbase-metrics-api-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-05T12:06:52,103 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-replication/target/hbase-replication-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-05T12:06:52,104 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-http/target/hbase-http-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-05T12:06:52,104 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-procedure/target/hbase-procedure-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-05T12:06:52,104 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-zookeeper/target/hbase-zookeeper-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-05T12:06:52,104 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-12-05T12:06:52,105 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-12-05T12:06:52,105 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-12-05T12:06:52,105 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-12-05T12:06:52,105 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-12-05T12:06:52,106 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-12-05T12:06:52,106 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-12-05T12:06:52,106 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-12-05T12:06:52,106 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-12-05T12:06:52,107 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-12-05T12:06:52,107 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-12-05T12:06:52,107 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-05T12:06:52,107 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-05T12:06:52,107 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-05T12:06:52,107 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-05T12:06:52,108 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-05T12:06:52,108 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-05T12:06:52,108 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-05T12:06:52,176 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742340_1516 (size=131440) 2024-12-05T12:06:52,177 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742340_1516 (size=131440) 2024-12-05T12:06:52,177 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742340_1516 (size=131440) 2024-12-05T12:06:52,203 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742341_1517 (size=4188619) 2024-12-05T12:06:52,203 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742341_1517 (size=4188619) 2024-12-05T12:06:52,203 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742341_1517 (size=4188619) 2024-12-05T12:06:52,215 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742342_1518 (size=1323991) 2024-12-05T12:06:52,215 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742342_1518 (size=1323991) 2024-12-05T12:06:52,215 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742342_1518 (size=1323991) 2024-12-05T12:06:52,231 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742343_1519 (size=903934) 2024-12-05T12:06:52,232 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742343_1519 (size=903934) 2024-12-05T12:06:52,232 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742343_1519 (size=903934) 2024-12-05T12:06:52,282 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742344_1520 (size=8360360) 2024-12-05T12:06:52,283 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742344_1520 (size=8360360) 2024-12-05T12:06:52,283 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742344_1520 (size=8360360) 2024-12-05T12:06:52,316 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742345_1521 (size=6425023) 2024-12-05T12:06:52,316 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742345_1521 (size=6425023) 2024-12-05T12:06:52,316 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742345_1521 (size=6425023) 2024-12-05T12:06:52,331 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742346_1522 (size=1877034) 2024-12-05T12:06:52,331 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742346_1522 (size=1877034) 2024-12-05T12:06:52,331 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742346_1522 (size=1877034) 2024-12-05T12:06:52,780 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742347_1523 (size=77835) 2024-12-05T12:06:52,780 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742347_1523 (size=77835) 2024-12-05T12:06:52,781 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742347_1523 (size=77835) 2024-12-05T12:06:52,794 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742348_1524 (size=30949) 2024-12-05T12:06:52,795 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742348_1524 (size=30949) 2024-12-05T12:06:52,796 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742348_1524 (size=30949) 2024-12-05T12:06:52,831 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742349_1525 (size=1597213) 2024-12-05T12:06:52,831 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742349_1525 (size=1597213) 2024-12-05T12:06:52,833 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742349_1525 (size=1597213) 2024-12-05T12:06:52,895 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742350_1526 (size=4695811) 2024-12-05T12:06:52,900 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742350_1526 (size=4695811) 2024-12-05T12:06:52,900 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742350_1526 (size=4695811) 2024-12-05T12:06:53,013 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742351_1527 (size=443172) 2024-12-05T12:06:53,014 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742351_1527 (size=443172) 2024-12-05T12:06:53,015 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742351_1527 (size=443172) 2024-12-05T12:06:53,056 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742352_1528 (size=232957) 2024-12-05T12:06:53,059 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742352_1528 (size=232957) 2024-12-05T12:06:53,059 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742352_1528 (size=232957) 2024-12-05T12:06:53,095 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742353_1529 (size=127628) 2024-12-05T12:06:53,097 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742353_1529 (size=127628) 2024-12-05T12:06:53,097 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742353_1529 (size=127628) 2024-12-05T12:06:53,121 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742354_1530 (size=20406) 2024-12-05T12:06:53,121 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742354_1530 (size=20406) 2024-12-05T12:06:53,124 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742354_1530 (size=20406) 2024-12-05T12:06:53,169 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742355_1531 (size=5175431) 2024-12-05T12:06:53,170 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742355_1531 (size=5175431) 2024-12-05T12:06:53,170 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742355_1531 (size=5175431) 2024-12-05T12:06:53,218 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742356_1532 (size=217634) 2024-12-05T12:06:53,218 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742356_1532 (size=217634) 2024-12-05T12:06:53,219 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742356_1532 (size=217634) 2024-12-05T12:06:53,286 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742357_1533 (size=1832290) 2024-12-05T12:06:53,287 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742357_1533 (size=1832290) 2024-12-05T12:06:53,287 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742357_1533 (size=1832290) 2024-12-05T12:06:53,315 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742358_1534 (size=322274) 2024-12-05T12:06:53,316 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742358_1534 (size=322274) 2024-12-05T12:06:53,317 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742358_1534 (size=322274) 2024-12-05T12:06:53,336 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742359_1535 (size=503880) 2024-12-05T12:06:53,337 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742359_1535 (size=503880) 2024-12-05T12:06:53,339 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742359_1535 (size=503880) 2024-12-05T12:06:53,372 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742360_1536 (size=29229) 2024-12-05T12:06:53,372 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742360_1536 (size=29229) 2024-12-05T12:06:53,373 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742360_1536 (size=29229) 2024-12-05T12:06:53,404 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742361_1537 (size=24096) 2024-12-05T12:06:53,406 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742361_1537 (size=24096) 2024-12-05T12:06:53,407 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742361_1537 (size=24096) 2024-12-05T12:06:53,438 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742362_1538 (size=111872) 2024-12-05T12:06:53,439 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742362_1538 (size=111872) 2024-12-05T12:06:53,439 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742362_1538 (size=111872) 2024-12-05T12:06:53,454 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742363_1539 (size=45609) 2024-12-05T12:06:53,454 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742363_1539 (size=45609) 2024-12-05T12:06:53,454 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742363_1539 (size=45609) 2024-12-05T12:06:53,735 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733400137807_0008_000001 (auth:SIMPLE) from 127.0.0.1:51216 2024-12-05T12:06:53,908 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742364_1540 (size=136454) 2024-12-05T12:06:53,909 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742364_1540 (size=136454) 2024-12-05T12:06:53,909 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742364_1540 (size=136454) 2024-12-05T12:06:53,911 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-12-05T12:06:53,914 INFO [Time-limited test {}] snapshot.ExportSnapshot(663): Loading Snapshot 'snaptb0-testExportWithChecksum' hfile list 2024-12-05T12:06:53,917 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=0 size=14.5 K 2024-12-05T12:06:53,917 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=1 size=8.0 K 2024-12-05T12:06:53,917 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=2 size=5.8 K 2024-12-05T12:06:53,918 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=3 size=5.0 K 2024-12-05T12:06:53,988 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742365_1541 (size=1023) 2024-12-05T12:06:53,988 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742365_1541 (size=1023) 2024-12-05T12:06:53,988 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742365_1541 (size=1023) 2024-12-05T12:06:54,037 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742366_1542 (size=35) 2024-12-05T12:06:54,037 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742366_1542 (size=35) 2024-12-05T12:06:54,038 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742366_1542 (size=35) 2024-12-05T12:06:54,080 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742367_1543 (size=304044) 2024-12-05T12:06:54,080 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742367_1543 (size=304044) 2024-12-05T12:06:54,081 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742367_1543 (size=304044) 2024-12-05T12:06:54,116 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-05T12:06:54,116 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-05T12:06:54,364 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733400137807_0009_000001 (auth:SIMPLE) from 127.0.0.1:48914 2024-12-05T12:06:55,036 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-05T12:06:58,847 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_693627247/yarn-7091037402/MiniMRCluster_693627247-localDir-nm-0_2/usercache/jenkins/appcache/application_1733400137807_0008/container_1733400137807_0008_01_000001/launch_container.sh] 2024-12-05T12:06:58,847 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_693627247/yarn-7091037402/MiniMRCluster_693627247-localDir-nm-0_2/usercache/jenkins/appcache/application_1733400137807_0008/container_1733400137807_0008_01_000001/container_tokens] 2024-12-05T12:06:58,847 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_693627247/yarn-7091037402/MiniMRCluster_693627247-localDir-nm-0_2/usercache/jenkins/appcache/application_1733400137807_0008/container_1733400137807_0008_01_000001/sysfs] 2024-12-05T12:07:00,732 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733400137807_0009_000001 (auth:SIMPLE) from 127.0.0.1:32868 2024-12-05T12:07:00,969 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742368_1544 (size=349742) 2024-12-05T12:07:00,970 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742368_1544 (size=349742) 2024-12-05T12:07:00,970 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742368_1544 (size=349742) 2024-12-05T12:07:02,961 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733400137807_0009_000001 (auth:SIMPLE) from 127.0.0.1:48084 2024-12-05T12:07:02,962 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733400137807_0009_000001 (auth:SIMPLE) from 127.0.0.1:55620 2024-12-05T12:07:03,817 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733400137807_0009_000001 (auth:SIMPLE) from 127.0.0.1:55626 2024-12-05T12:07:03,822 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733400137807_0009_000001 (auth:SIMPLE) from 127.0.0.1:48094 2024-12-05T12:07:06,729 WARN [NM Event dispatcher {}] containermanager.ContainerManagerImpl(1784): couldn't find container container_1733400137807_0009_01_000006 while processing FINISH_CONTAINERS event 2024-12-05T12:07:08,983 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. Error: java.io.IOException: Checksum mismatch between hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportWithChecksum/5c1512f0bac1eb033bb472e2b4a64235/cf/629570661b704ad0917762179199ef0b and file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/b25dcafd-a00c-c924-93f8-4d057d2ca201/local-export-1733400411098/archive/data/default/testtb-testExportWithChecksum/5c1512f0bac1eb033bb472e2b4a64235/cf/629570661b704ad0917762179199ef0b. Input and output filesystems are of different types. Their checksum algorithms may be incompatible. You can choose file-level checksum validation via -Ddfs.checksum.combine.mode=COMPOSITE_CRC when block-sizes or filesystems are different. Or you can skip checksum-checks altogether with -no-checksum-verify, for the table backup scenario, you should use -i option to skip checksum-checks. (NOTE: By skipping checksums, one runs the risk of masking data-corruption during file-transfer.) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.verifyCopyResult(ExportSnapshot.java:601) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.copyFile(ExportSnapshot.java:337) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:259) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:183) at org.apache.hadoop.mapreduce.Mapper.run(Mapper.java:145) at org.apache.hadoop.mapred.MapTask.runNewMapper(MapTask.java:800) at org.apache.hadoop.mapred.MapTask.run(MapTask.java:348) at org.apache.hadoop.mapred.YarnChild$2.run(YarnChild.java:178) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.mapred.YarnChild.main(YarnChild.java:172) 2024-12-05T12:07:09,842 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733400137807_0009_000001 (auth:SIMPLE) from 127.0.0.1:40416 Error: java.io.IOException: Checksum mismatch between hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportWithChecksum/ac3d10a1d44d11ee31615fff4b4a5947/cf/fa5423db36394c1ebcdc47a3d35a595f and file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/b25dcafd-a00c-c924-93f8-4d057d2ca201/local-export-1733400411098/archive/data/default/testtb-testExportWithChecksum/ac3d10a1d44d11ee31615fff4b4a5947/cf/fa5423db36394c1ebcdc47a3d35a595f. Input and output filesystems are of different types. Their checksum algorithms may be incompatible. You can choose file-level checksum validation via -Ddfs.checksum.combine.mode=COMPOSITE_CRC when block-sizes or filesystems are different. Or you can skip checksum-checks altogether with -no-checksum-verify, for the table backup scenario, you should use -i option to skip checksum-checks. (NOTE: By skipping checksums, one runs the risk of masking data-corruption during file-transfer.) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.verifyCopyResult(ExportSnapshot.java:601) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.copyFile(ExportSnapshot.java:337) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:259) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:183) at org.apache.hadoop.mapreduce.Mapper.run(Mapper.java:145) at org.apache.hadoop.mapred.MapTask.runNewMapper(MapTask.java:800) at org.apache.hadoop.mapred.MapTask.run(MapTask.java:348) at org.apache.hadoop.mapred.YarnChild$2.run(YarnChild.java:178) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.mapred.YarnChild.main(YarnChild.java:172) 2024-12-05T12:07:10,938 INFO [LruBlockCacheStatsExecutor {}] hfile.LruBlockCache(1020): totalSize=880 MB, usedSize=660.99 KB, freeSize=879.35 MB, max=880 MB, blockCount=0, accesses=0, hits=0, hitRatio=0, cachingAccesses=0, cachingHits=0, cachingHitsRatio=0,evictions=29, evicted=0, evictedPerRun=0.0 2024-12-05T12:07:10,982 INFO [LruBlockCacheStatsExecutor {}] hfile.LruBlockCache(1020): totalSize=880 MB, usedSize=660.99 KB, freeSize=879.35 MB, max=880 MB, blockCount=0, accesses=0, hits=0, hitRatio=0, cachingAccesses=0, cachingHits=0, cachingHitsRatio=0,evictions=29, evicted=0, evictedPerRun=0.0 2024-12-05T12:07:11,037 INFO [LruBlockCacheStatsExecutor {}] hfile.LruBlockCache(1020): totalSize=880 MB, usedSize=920.55 KB, freeSize=879.10 MB, max=880 MB, blockCount=5, accesses=7, hits=2, hitRatio=28.57%, , cachingAccesses=7, cachingHits=2, cachingHitsRatio=28.57%, evictions=29, evicted=0, evictedPerRun=0.0 2024-12-05T12:07:11,153 DEBUG [master/80666a11a09f:0:becomeActiveMaster-MemStoreChunkPool Statistics {}] regionserver.ChunkCreator$MemStoreChunkPool$StatisticsThread(417): data stats (chunk size=2097152): current pool size=2, created chunk count=10, reused chunk count=22, reuseRatio=68.75% 2024-12-05T12:07:11,153 DEBUG [master/80666a11a09f:0:becomeActiveMaster-MemStoreChunkPool Statistics {}] regionserver.ChunkCreator$MemStoreChunkPool$StatisticsThread(417): index stats (chunk size=209715): current pool size=0, created chunk count=0, reused chunk count=0, reuseRatio=0 2024-12-05T12:07:11,865 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733400137807_0009_000001 (auth:SIMPLE) from 127.0.0.1:48974 Error: java.io.IOException: Checksum mismatch between hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/mobdir/data/default/testtb-testExportWithChecksum/079394da860334b7f5313f35a50f5bc6/cf/d41d8cd98f00b204e9800998ecf8427e2024120570882894d19a49e8abd31fe832b76c4a_ac3d10a1d44d11ee31615fff4b4a5947 and file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/b25dcafd-a00c-c924-93f8-4d057d2ca201/local-export-1733400411098/archive/data/default/testtb-testExportWithChecksum/079394da860334b7f5313f35a50f5bc6/cf/d41d8cd98f00b204e9800998ecf8427e2024120570882894d19a49e8abd31fe832b76c4a_ac3d10a1d44d11ee31615fff4b4a5947. Input and output filesystems are of different types. Their checksum algorithms may be incompatible. You can choose file-level checksum validation via -Ddfs.checksum.combine.mode=COMPOSITE_CRC when block-sizes or filesystems are different. Or you can skip checksum-checks altogether with -no-checksum-verify, for the table backup scenario, you should use -i option to skip checksum-checks. (NOTE: By skipping checksums, one runs the risk of masking data-corruption during file-transfer.) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.verifyCopyResult(ExportSnapshot.java:601) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.copyFile(ExportSnapshot.java:337) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:259) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:183) at org.apache.hadoop.mapreduce.Mapper.run(Mapper.java:145) at org.apache.hadoop.mapred.MapTask.runNewMapper(MapTask.java:800) at org.apache.hadoop.mapred.MapTask.run(MapTask.java:348) at org.apache.hadoop.mapred.YarnChild$2.run(YarnChild.java:178) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.mapred.YarnChild.main(YarnChild.java:172) 2024-12-05T12:07:12,354 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region e2d55a422a8d6cd55d6d7cbfae79e377, had cached 0 bytes from a total of 14661 2024-12-05T12:07:12,354 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region d819fc7dca4529788a4ca3a58e3fe9e5, had cached 0 bytes from a total of 5888 2024-12-05T12:07:12,535 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_693627247/yarn-7091037402/MiniMRCluster_693627247-localDir-nm-0_3/usercache/jenkins/appcache/application_1733400137807_0009/container_1733400137807_0009_01_000005/launch_container.sh] 2024-12-05T12:07:12,535 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_693627247/yarn-7091037402/MiniMRCluster_693627247-localDir-nm-0_3/usercache/jenkins/appcache/application_1733400137807_0009/container_1733400137807_0009_01_000005/container_tokens] 2024-12-05T12:07:12,535 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_693627247/yarn-7091037402/MiniMRCluster_693627247-localDir-nm-0_3/usercache/jenkins/appcache/application_1733400137807_0009/container_1733400137807_0009_01_000005/sysfs] 2024-12-05T12:07:12,625 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_693627247/yarn-7091037402/MiniMRCluster_693627247-localDir-nm-0_0/usercache/jenkins/appcache/application_1733400137807_0009/container_1733400137807_0009_01_000003/launch_container.sh] 2024-12-05T12:07:12,625 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_693627247/yarn-7091037402/MiniMRCluster_693627247-localDir-nm-0_0/usercache/jenkins/appcache/application_1733400137807_0009/container_1733400137807_0009_01_000003/container_tokens] 2024-12-05T12:07:12,625 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_693627247/yarn-7091037402/MiniMRCluster_693627247-localDir-nm-0_0/usercache/jenkins/appcache/application_1733400137807_0009/container_1733400137807_0009_01_000003/sysfs] 2024-12-05T12:07:12,831 INFO [regionserver/80666a11a09f:0.Chore.1 {}] regionserver.Replication$ReplicationStatisticsChore(208): Global stats: WAL Edits Buffer Used=0B, Limit=268435456B 2024-12-05T12:07:12,831 INFO [regionserver/80666a11a09f:0.Chore.1 {}] regionserver.Replication$ReplicationStatisticsChore(208): Global stats: WAL Edits Buffer Used=0B, Limit=268435456B 2024-12-05T12:07:12,831 INFO [regionserver/80666a11a09f:0.Chore.1 {}] regionserver.Replication$ReplicationStatisticsChore(208): Global stats: WAL Edits Buffer Used=0B, Limit=268435456B Error: java.io.IOException: Checksum mismatch between hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/mobdir/data/default/testtb-testExportWithChecksum/079394da860334b7f5313f35a50f5bc6/cf/c4ca4238a0b923820dcc509a6f75849b20241205f71fd37660314339932a04a4dcbd7e8b_5c1512f0bac1eb033bb472e2b4a64235 and file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/b25dcafd-a00c-c924-93f8-4d057d2ca201/local-export-1733400411098/archive/data/default/testtb-testExportWithChecksum/079394da860334b7f5313f35a50f5bc6/cf/c4ca4238a0b923820dcc509a6f75849b20241205f71fd37660314339932a04a4dcbd7e8b_5c1512f0bac1eb033bb472e2b4a64235. Input and output filesystems are of different types. Their checksum algorithms may be incompatible. You can choose file-level checksum validation via -Ddfs.checksum.combine.mode=COMPOSITE_CRC when block-sizes or filesystems are different. Or you can skip checksum-checks altogether with -no-checksum-verify, for the table backup scenario, you should use -i option to skip checksum-checks. (NOTE: By skipping checksums, one runs the risk of masking data-corruption during file-transfer.) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.verifyCopyResult(ExportSnapshot.java:601) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.copyFile(ExportSnapshot.java:337) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:259) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:183) at org.apache.hadoop.mapreduce.Mapper.run(Mapper.java:145) at org.apache.hadoop.mapred.MapTask.runNewMapper(MapTask.java:800) at org.apache.hadoop.mapred.MapTask.run(MapTask.java:348) at org.apache.hadoop.mapred.YarnChild$2.run(YarnChild.java:178) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.mapred.YarnChild.main(YarnChild.java:172) 2024-12-05T12:07:13,799 INFO [regionserver/80666a11a09f:0.Chore.1 {}] regionserver.HRegionServer$PeriodicMemStoreFlusher(1763): MemstoreFlusherChore requesting flush of hbase:meta,,1.1588230740 because 1588230740/ns has an old edit so flush to free WALs after random delay 145255 ms 2024-12-05T12:07:13,877 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733400137807_0009_000001 (auth:SIMPLE) from 127.0.0.1:40422 2024-12-05T12:07:13,877 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733400137807_0009_000001 (auth:SIMPLE) from 127.0.0.1:48984 2024-12-05T12:07:13,948 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_693627247/yarn-7091037402/MiniMRCluster_693627247-localDir-nm-1_2/usercache/jenkins/appcache/application_1733400137807_0009/container_1733400137807_0009_01_000002/launch_container.sh] 2024-12-05T12:07:13,948 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_693627247/yarn-7091037402/MiniMRCluster_693627247-localDir-nm-1_2/usercache/jenkins/appcache/application_1733400137807_0009/container_1733400137807_0009_01_000002/container_tokens] 2024-12-05T12:07:13,948 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_693627247/yarn-7091037402/MiniMRCluster_693627247-localDir-nm-1_2/usercache/jenkins/appcache/application_1733400137807_0009/container_1733400137807_0009_01_000002/sysfs] 2024-12-05T12:07:13,953 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(219): Skipping table testExportExpiredSnapshot because normalization is disabled in its table properties and normalization is also disabled at table level by default 2024-12-05T12:07:13,953 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(219): Skipping table testtb-testExportWithChecksum because normalization is disabled in its table properties and normalization is also disabled at table level by default 2024-12-05T12:07:13,953 DEBUG [master/80666a11a09f:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region d819fc7dca4529788a4ca3a58e3fe9e5 changed from -1.0 to 0.0, refreshing cache 2024-12-05T12:07:13,953 DEBUG [master/80666a11a09f:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region ac3d10a1d44d11ee31615fff4b4a5947 changed from -1.0 to 0.0, refreshing cache 2024-12-05T12:07:13,953 DEBUG [master/80666a11a09f:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region e2d55a422a8d6cd55d6d7cbfae79e377 changed from -1.0 to 0.0, refreshing cache 2024-12-05T12:07:13,953 DEBUG [master/80666a11a09f:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 5c1512f0bac1eb033bb472e2b4a64235 changed from -1.0 to 0.0, refreshing cache 2024-12-05T12:07:13,957 DEBUG [master/80666a11a09f:0.Chore.1 {}] rsgroup.RSGroupBasedLoadBalancer(138): Balancing RSGroup=default 2024-12-05T12:07:13,957 INFO [master/80666a11a09f:0.Chore.1 {}] rsgroup.RSGroupBasedLoadBalancer(151): Start Generate Balance plan for group: default 2024-12-05T12:07:13,957 DEBUG [master/80666a11a09f:0.Chore.1 {}] balancer.BaseLoadBalancer(619): Start Generate Balance plan for cluster. 2024-12-05T12:07:13,960 DEBUG [master/80666a11a09f:0.Chore.1 {}] balancer.BalancerClusterState(204): Hosts are {80666a11a09f=0} racks are {/default-rack=0} 2024-12-05T12:07:13,960 DEBUG [master/80666a11a09f:0.Chore.1 {}] balancer.BalancerClusterState(303): server 0 has 1 regions 2024-12-05T12:07:13,960 DEBUG [master/80666a11a09f:0.Chore.1 {}] balancer.BalancerClusterState(303): server 1 has 3 regions 2024-12-05T12:07:13,960 DEBUG [master/80666a11a09f:0.Chore.1 {}] balancer.BalancerClusterState(303): server 2 has 2 regions 2024-12-05T12:07:13,960 DEBUG [master/80666a11a09f:0.Chore.1 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-05T12:07:13,961 DEBUG [master/80666a11a09f:0.Chore.1 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-05T12:07:13,961 DEBUG [master/80666a11a09f:0.Chore.1 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-05T12:07:13,961 INFO [master/80666a11a09f:0.Chore.1 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-05T12:07:13,961 INFO [master/80666a11a09f:0.Chore.1 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-05T12:07:13,961 INFO [master/80666a11a09f:0.Chore.1 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-05T12:07:13,961 DEBUG [master/80666a11a09f:0.Chore.1 {}] balancer.BalancerClusterState(326): Number of tables=4, number of hosts=1, number of racks=1 2024-12-05T12:07:13,964 INFO [master/80666a11a09f:0.Chore.1 {}] balancer.StochasticLoadBalancer(403): Cluster wide - Calculating plan. may take up to 30000ms to complete. 2024-12-05T12:07:13,965 INFO [master/80666a11a09f:0.Chore.1 {}] balancer.StochasticLoadBalancer(515): Start StochasticLoadBalancer.balancer, initial weighted average imbalance=0.2543267517145956, functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.2886751345948129, need balance); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); ServerLocalityCostFunction : (multiplier=25.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.821074384418005, need balance); CPRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.8533464809192823, need balance); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.33333333333333337, need balance); computedMaxSteps=14400 2024-12-05T12:07:14,415 INFO [master/80666a11a09f:0.Chore.1 {}] balancer.StochasticLoadBalancer(562): Finished computing new moving plan. Computation took 453 ms to try 14400 different iterations. Found a solution that moves 1 regions; Going from a computed imbalance of 0.2543267517145956 to a new imbalance of 0.015714614486578425. funtionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.16666666666666666, need balance); ServerLocalityCostFunction : (multiplier=25.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.821074384418005, need balance); CPRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.8533464809192823, need balance); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-12-05T12:07:14,419 INFO [master/80666a11a09f:0.Chore.1 {}] master.HMaster(2167): Balancer plans size is 1, the balance interval is 300000 ms, and the max number regions in transition is 6 2024-12-05T12:07:14,419 INFO [master/80666a11a09f:0.Chore.1 {}] master.HMaster(2172): balance hri=ac3d10a1d44d11ee31615fff4b4a5947, source=80666a11a09f,42305,1733400131012, destination=80666a11a09f,37629,1733400130958 2024-12-05T12:07:14,425 DEBUG [master/80666a11a09f:0.Chore.1 {}] procedure2.ProcedureExecutor(1139): Stored pid=224, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=ac3d10a1d44d11ee31615fff4b4a5947, REOPEN/MOVE 2024-12-05T12:07:14,425 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=224, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=ac3d10a1d44d11ee31615fff4b4a5947, REOPEN/MOVE 2024-12-05T12:07:14,427 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=224 updating hbase:meta row=ac3d10a1d44d11ee31615fff4b4a5947, regionState=CLOSING, regionLocation=80666a11a09f,42305,1733400131012 2024-12-05T12:07:14,432 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=224, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=ac3d10a1d44d11ee31615fff4b4a5947, REOPEN/MOVE because future has completed 2024-12-05T12:07:14,432 DEBUG [PEWorker-2 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-05T12:07:14,432 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=225, ppid=224, state=RUNNABLE, hasLock=false; CloseRegionProcedure ac3d10a1d44d11ee31615fff4b4a5947, server=80666a11a09f,42305,1733400131012}] 2024-12-05T12:07:14,585 INFO [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=225}] handler.UnassignRegionHandler(122): Close ac3d10a1d44d11ee31615fff4b4a5947 2024-12-05T12:07:14,585 DEBUG [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=225}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-05T12:07:14,585 DEBUG [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=225}] regionserver.HRegion(1722): Closing ac3d10a1d44d11ee31615fff4b4a5947, disabling compactions & flushes 2024-12-05T12:07:14,585 INFO [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=225}] regionserver.HRegion(1755): Closing region testtb-testExportWithChecksum,,1733400409749.ac3d10a1d44d11ee31615fff4b4a5947. 2024-12-05T12:07:14,585 DEBUG [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=225}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithChecksum,,1733400409749.ac3d10a1d44d11ee31615fff4b4a5947. 2024-12-05T12:07:14,586 DEBUG [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=225}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithChecksum,,1733400409749.ac3d10a1d44d11ee31615fff4b4a5947. after waiting 0 ms 2024-12-05T12:07:14,586 DEBUG [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=225}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithChecksum,,1733400409749.ac3d10a1d44d11ee31615fff4b4a5947. 2024-12-05T12:07:14,624 DEBUG [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=225}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportWithChecksum/ac3d10a1d44d11ee31615fff4b4a5947/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-05T12:07:14,624 DEBUG [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=225}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-05T12:07:14,625 INFO [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=225}] regionserver.HRegion(1973): Closed testtb-testExportWithChecksum,,1733400409749.ac3d10a1d44d11ee31615fff4b4a5947. 2024-12-05T12:07:14,625 DEBUG [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=225}] regionserver.HRegion(1676): Region close journal for ac3d10a1d44d11ee31615fff4b4a5947: Waiting for close lock at 1733400434585Running coprocessor pre-close hooks at 1733400434585Disabling compacts and flushes for region at 1733400434585Disabling writes for close at 1733400434586 (+1 ms)Writing region close event to WAL at 1733400434614 (+28 ms)Running coprocessor post-close hooks at 1733400434624 (+10 ms)Closed at 1733400434624 2024-12-05T12:07:14,625 INFO [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=225}] regionserver.HRegionServer(3302): Adding ac3d10a1d44d11ee31615fff4b4a5947 move to 80666a11a09f,37629,1733400130958 record at close sequenceid=6 2024-12-05T12:07:14,628 INFO [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=225}] handler.UnassignRegionHandler(157): Closed ac3d10a1d44d11ee31615fff4b4a5947 2024-12-05T12:07:14,629 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=224 updating hbase:meta row=ac3d10a1d44d11ee31615fff4b4a5947, regionState=CLOSED 2024-12-05T12:07:14,631 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=225, ppid=224, state=RUNNABLE, hasLock=false; CloseRegionProcedure ac3d10a1d44d11ee31615fff4b4a5947, server=80666a11a09f,42305,1733400131012 because future has completed 2024-12-05T12:07:14,636 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=225, resume processing ppid=224 2024-12-05T12:07:14,636 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=225, ppid=224, state=SUCCESS, hasLock=false; CloseRegionProcedure ac3d10a1d44d11ee31615fff4b4a5947, server=80666a11a09f,42305,1733400131012 in 200 msec 2024-12-05T12:07:14,637 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(269): Starting pid=224, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=ac3d10a1d44d11ee31615fff4b4a5947, REOPEN/MOVE; state=CLOSED, location=80666a11a09f,37629,1733400130958; forceNewPlan=false, retain=false 2024-12-05T12:07:14,788 INFO [80666a11a09f:34947 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-12-05T12:07:14,788 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=224 updating hbase:meta row=ac3d10a1d44d11ee31615fff4b4a5947, regionState=OPENING, regionLocation=80666a11a09f,37629,1733400130958 2024-12-05T12:07:14,791 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=224, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=ac3d10a1d44d11ee31615fff4b4a5947, REOPEN/MOVE because future has completed 2024-12-05T12:07:14,791 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=226, ppid=224, state=RUNNABLE, hasLock=false; OpenRegionProcedure ac3d10a1d44d11ee31615fff4b4a5947, server=80666a11a09f,37629,1733400130958}] 2024-12-05T12:07:14,949 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47089, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-05T12:07:14,953 INFO [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=226}] handler.AssignRegionHandler(132): Open testtb-testExportWithChecksum,,1733400409749.ac3d10a1d44d11ee31615fff4b4a5947. 2024-12-05T12:07:14,954 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=226}] regionserver.HRegion(7752): Opening region: {ENCODED => ac3d10a1d44d11ee31615fff4b4a5947, NAME => 'testtb-testExportWithChecksum,,1733400409749.ac3d10a1d44d11ee31615fff4b4a5947.', STARTKEY => '', ENDKEY => '1'} 2024-12-05T12:07:14,954 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=226}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportWithChecksum,,1733400409749.ac3d10a1d44d11ee31615fff4b4a5947. service=AccessControlService 2024-12-05T12:07:14,954 INFO [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=226}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-05T12:07:14,955 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=226}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportWithChecksum ac3d10a1d44d11ee31615fff4b4a5947 2024-12-05T12:07:14,955 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=226}] regionserver.HRegion(898): Instantiated testtb-testExportWithChecksum,,1733400409749.ac3d10a1d44d11ee31615fff4b4a5947.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-05T12:07:14,955 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=226}] regionserver.HRegion(7794): checking encryption for ac3d10a1d44d11ee31615fff4b4a5947 2024-12-05T12:07:14,955 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=226}] regionserver.HRegion(7797): checking classloading for ac3d10a1d44d11ee31615fff4b4a5947 2024-12-05T12:07:14,970 INFO [StoreOpener-ac3d10a1d44d11ee31615fff4b4a5947-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region ac3d10a1d44d11ee31615fff4b4a5947 2024-12-05T12:07:14,971 INFO [StoreOpener-ac3d10a1d44d11ee31615fff4b4a5947-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region ac3d10a1d44d11ee31615fff4b4a5947 columnFamilyName cf 2024-12-05T12:07:14,972 DEBUG [StoreOpener-ac3d10a1d44d11ee31615fff4b4a5947-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T12:07:14,993 DEBUG [StoreOpener-ac3d10a1d44d11ee31615fff4b4a5947-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportWithChecksum/ac3d10a1d44d11ee31615fff4b4a5947/cf/fa5423db36394c1ebcdc47a3d35a595f 2024-12-05T12:07:14,994 INFO [StoreOpener-ac3d10a1d44d11ee31615fff4b4a5947-1 {}] regionserver.HStore(327): Store=ac3d10a1d44d11ee31615fff4b4a5947/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-05T12:07:14,994 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=226}] regionserver.HRegion(1038): replaying wal for ac3d10a1d44d11ee31615fff4b4a5947 2024-12-05T12:07:14,995 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=226}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportWithChecksum/ac3d10a1d44d11ee31615fff4b4a5947 2024-12-05T12:07:15,003 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=226}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportWithChecksum/ac3d10a1d44d11ee31615fff4b4a5947 2024-12-05T12:07:15,003 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=226}] regionserver.HRegion(1048): stopping wal replay for ac3d10a1d44d11ee31615fff4b4a5947 2024-12-05T12:07:15,003 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=226}] regionserver.HRegion(1060): Cleaning up temporary data for ac3d10a1d44d11ee31615fff4b4a5947 2024-12-05T12:07:15,006 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=226}] regionserver.HRegion(1093): writing seq id for ac3d10a1d44d11ee31615fff4b4a5947 2024-12-05T12:07:15,007 INFO [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=226}] regionserver.HRegion(1114): Opened ac3d10a1d44d11ee31615fff4b4a5947; next sequenceid=10; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=75094567, jitterRate=0.11899624764919281}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-05T12:07:15,007 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=226}] regionserver.HRegion(1122): Running coprocessor post-open hooks for ac3d10a1d44d11ee31615fff4b4a5947 2024-12-05T12:07:15,007 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=226}] regionserver.HRegion(1006): Region open journal for ac3d10a1d44d11ee31615fff4b4a5947: Running coprocessor pre-open hook at 1733400434955Writing region info on filesystem at 1733400434955Initializing all the Stores at 1733400434956 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733400434956Cleaning up temporary data from old regions at 1733400435003 (+47 ms)Running coprocessor post-open hooks at 1733400435007 (+4 ms)Region opened successfully at 1733400435007 2024-12-05T12:07:15,008 INFO [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=226}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportWithChecksum,,1733400409749.ac3d10a1d44d11ee31615fff4b4a5947., pid=226, masterSystemTime=1733400434948 2024-12-05T12:07:15,010 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=226}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportWithChecksum,,1733400409749.ac3d10a1d44d11ee31615fff4b4a5947. 2024-12-05T12:07:15,010 INFO [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=226}] handler.AssignRegionHandler(153): Opened testtb-testExportWithChecksum,,1733400409749.ac3d10a1d44d11ee31615fff4b4a5947. 2024-12-05T12:07:15,011 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=224 updating hbase:meta row=ac3d10a1d44d11ee31615fff4b4a5947, regionState=OPEN, openSeqNum=10, regionLocation=80666a11a09f,37629,1733400130958 2024-12-05T12:07:15,013 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=226, ppid=224, state=RUNNABLE, hasLock=false; OpenRegionProcedure ac3d10a1d44d11ee31615fff4b4a5947, server=80666a11a09f,37629,1733400130958 because future has completed 2024-12-05T12:07:15,016 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=226, resume processing ppid=224 2024-12-05T12:07:15,016 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=226, ppid=224, state=SUCCESS, hasLock=false; OpenRegionProcedure ac3d10a1d44d11ee31615fff4b4a5947, server=80666a11a09f,37629,1733400130958 in 223 msec 2024-12-05T12:07:15,019 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=224, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=ac3d10a1d44d11ee31615fff4b4a5947, REOPEN/MOVE in 598 msec 2024-12-05T12:07:15,026 DEBUG [master/80666a11a09f:0.Chore.1 {}] master.HMaster(2203): Balancer is going into sleep until next period in 300000ms 2024-12-05T12:07:15,035 DEBUG [master/80666a11a09f:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-12-05T12:07:15,521 WARN [ContainersLauncher #3 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_693627247/yarn-7091037402/MiniMRCluster_693627247-localDir-nm-1_2/usercache/jenkins/appcache/application_1733400137807_0009/container_1733400137807_0009_01_000004/launch_container.sh] 2024-12-05T12:07:15,522 WARN [ContainersLauncher #3 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_693627247/yarn-7091037402/MiniMRCluster_693627247-localDir-nm-1_2/usercache/jenkins/appcache/application_1733400137807_0009/container_1733400137807_0009_01_000004/container_tokens] 2024-12-05T12:07:15,522 WARN [ContainersLauncher #3 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_693627247/yarn-7091037402/MiniMRCluster_693627247-localDir-nm-1_2/usercache/jenkins/appcache/application_1733400137807_0009/container_1733400137807_0009_01_000004/sysfs] Error: java.io.IOException: Checksum mismatch between hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportWithChecksum/5c1512f0bac1eb033bb472e2b4a64235/cf/629570661b704ad0917762179199ef0b and file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/b25dcafd-a00c-c924-93f8-4d057d2ca201/local-export-1733400411098/archive/data/default/testtb-testExportWithChecksum/5c1512f0bac1eb033bb472e2b4a64235/cf/629570661b704ad0917762179199ef0b. Input and output filesystems are of different types. Their checksum algorithms may be incompatible. You can choose file-level checksum validation via -Ddfs.checksum.combine.mode=COMPOSITE_CRC when block-sizes or filesystems are different. Or you can skip checksum-checks altogether with -no-checksum-verify, for the table backup scenario, you should use -i option to skip checksum-checks. (NOTE: By skipping checksums, one runs the risk of masking data-corruption during file-transfer.) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.verifyCopyResult(ExportSnapshot.java:601) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.copyFile(ExportSnapshot.java:337) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:259) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:183) at org.apache.hadoop.mapreduce.Mapper.run(Mapper.java:145) at org.apache.hadoop.mapred.MapTask.runNewMapper(MapTask.java:800) at org.apache.hadoop.mapred.MapTask.run(MapTask.java:348) at org.apache.hadoop.mapred.YarnChild$2.run(YarnChild.java:178) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.mapred.YarnChild.main(YarnChild.java:172) 2024-12-05T12:07:18,006 WARN [ContainersLauncher #4 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_693627247/yarn-7091037402/MiniMRCluster_693627247-localDir-nm-1_0/usercache/jenkins/appcache/application_1733400137807_0009/container_1733400137807_0009_01_000008/launch_container.sh] 2024-12-05T12:07:18,006 WARN [ContainersLauncher #4 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_693627247/yarn-7091037402/MiniMRCluster_693627247-localDir-nm-1_0/usercache/jenkins/appcache/application_1733400137807_0009/container_1733400137807_0009_01_000008/container_tokens] 2024-12-05T12:07:18,006 WARN [ContainersLauncher #4 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_693627247/yarn-7091037402/MiniMRCluster_693627247-localDir-nm-1_0/usercache/jenkins/appcache/application_1733400137807_0009/container_1733400137807_0009_01_000008/sysfs] Error: java.io.IOException: Checksum mismatch between hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportWithChecksum/ac3d10a1d44d11ee31615fff4b4a5947/cf/fa5423db36394c1ebcdc47a3d35a595f and file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/b25dcafd-a00c-c924-93f8-4d057d2ca201/local-export-1733400411098/archive/data/default/testtb-testExportWithChecksum/ac3d10a1d44d11ee31615fff4b4a5947/cf/fa5423db36394c1ebcdc47a3d35a595f. Input and output filesystems are of different types. Their checksum algorithms may be incompatible. You can choose file-level checksum validation via -Ddfs.checksum.combine.mode=COMPOSITE_CRC when block-sizes or filesystems are different. Or you can skip checksum-checks altogether with -no-checksum-verify, for the table backup scenario, you should use -i option to skip checksum-checks. (NOTE: By skipping checksums, one runs the risk of masking data-corruption during file-transfer.) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.verifyCopyResult(ExportSnapshot.java:601) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.copyFile(ExportSnapshot.java:337) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:259) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:183) at org.apache.hadoop.mapreduce.Mapper.run(Mapper.java:145) at org.apache.hadoop.mapred.MapTask.runNewMapper(MapTask.java:800) at org.apache.hadoop.mapred.MapTask.run(MapTask.java:348) at org.apache.hadoop.mapred.YarnChild$2.run(YarnChild.java:178) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.mapred.YarnChild.main(YarnChild.java:172) 2024-12-05T12:07:18,874 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733400137807_0009_000001 (auth:SIMPLE) from 127.0.0.1:39182 2024-12-05T12:07:19,016 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-05T12:07:19,494 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_693627247/yarn-7091037402/MiniMRCluster_693627247-localDir-nm-0_2/usercache/jenkins/appcache/application_1733400137807_0009/container_1733400137807_0009_01_000010/launch_container.sh] 2024-12-05T12:07:19,494 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_693627247/yarn-7091037402/MiniMRCluster_693627247-localDir-nm-0_2/usercache/jenkins/appcache/application_1733400137807_0009/container_1733400137807_0009_01_000010/container_tokens] 2024-12-05T12:07:19,494 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_693627247/yarn-7091037402/MiniMRCluster_693627247-localDir-nm-0_2/usercache/jenkins/appcache/application_1733400137807_0009/container_1733400137807_0009_01_000010/sysfs] 2024-12-05T12:07:19,610 WARN [ContainersLauncher #5 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_693627247/yarn-7091037402/MiniMRCluster_693627247-localDir-nm-1_3/usercache/jenkins/appcache/application_1733400137807_0009/container_1733400137807_0009_01_000009/launch_container.sh] 2024-12-05T12:07:19,610 WARN [ContainersLauncher #5 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_693627247/yarn-7091037402/MiniMRCluster_693627247-localDir-nm-1_3/usercache/jenkins/appcache/application_1733400137807_0009/container_1733400137807_0009_01_000009/container_tokens] 2024-12-05T12:07:19,610 WARN [ContainersLauncher #5 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_693627247/yarn-7091037402/MiniMRCluster_693627247-localDir-nm-1_3/usercache/jenkins/appcache/application_1733400137807_0009/container_1733400137807_0009_01_000009/sysfs] 2024-12-05T12:07:19,880 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733400137807_0009_000001 (auth:SIMPLE) from 127.0.0.1:48076 2024-12-05T12:07:19,882 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733400137807_0009_000001 (auth:SIMPLE) from 127.0.0.1:48080 Error: java.io.IOException: Checksum mismatch between hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/mobdir/data/default/testtb-testExportWithChecksum/079394da860334b7f5313f35a50f5bc6/cf/c4ca4238a0b923820dcc509a6f75849b20241205f71fd37660314339932a04a4dcbd7e8b_5c1512f0bac1eb033bb472e2b4a64235 and file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/b25dcafd-a00c-c924-93f8-4d057d2ca201/local-export-1733400411098/archive/data/default/testtb-testExportWithChecksum/079394da860334b7f5313f35a50f5bc6/cf/c4ca4238a0b923820dcc509a6f75849b20241205f71fd37660314339932a04a4dcbd7e8b_5c1512f0bac1eb033bb472e2b4a64235. Input and output filesystems are of different types. Their checksum algorithms may be incompatible. You can choose file-level checksum validation via -Ddfs.checksum.combine.mode=COMPOSITE_CRC when block-sizes or filesystems are different. Or you can skip checksum-checks altogether with -no-checksum-verify, for the table backup scenario, you should use -i option to skip checksum-checks. (NOTE: By skipping checksums, one runs the risk of masking data-corruption during file-transfer.) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.verifyCopyResult(ExportSnapshot.java:601) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.copyFile(ExportSnapshot.java:337) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:259) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:183) at org.apache.hadoop.mapreduce.Mapper.run(Mapper.java:145) at org.apache.hadoop.mapred.MapTask.runNewMapper(MapTask.java:800) at org.apache.hadoop.mapred.MapTask.run(MapTask.java:348) at org.apache.hadoop.mapred.YarnChild$2.run(YarnChild.java:178) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.mapred.YarnChild.main(YarnChild.java:172) Error: java.io.IOException: Checksum mismatch between hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/mobdir/data/default/testtb-testExportWithChecksum/079394da860334b7f5313f35a50f5bc6/cf/d41d8cd98f00b204e9800998ecf8427e2024120570882894d19a49e8abd31fe832b76c4a_ac3d10a1d44d11ee31615fff4b4a5947 and file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/b25dcafd-a00c-c924-93f8-4d057d2ca201/local-export-1733400411098/archive/data/default/testtb-testExportWithChecksum/079394da860334b7f5313f35a50f5bc6/cf/d41d8cd98f00b204e9800998ecf8427e2024120570882894d19a49e8abd31fe832b76c4a_ac3d10a1d44d11ee31615fff4b4a5947. Input and output filesystems are of different types. Their checksum algorithms may be incompatible. You can choose file-level checksum validation via -Ddfs.checksum.combine.mode=COMPOSITE_CRC when block-sizes or filesystems are different. Or you can skip checksum-checks altogether with -no-checksum-verify, for the table backup scenario, you should use -i option to skip checksum-checks. (NOTE: By skipping checksums, one runs the risk of masking data-corruption during file-transfer.) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.verifyCopyResult(ExportSnapshot.java:601) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.copyFile(ExportSnapshot.java:337) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:259) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:183) at org.apache.hadoop.mapreduce.Mapper.run(Mapper.java:145) at org.apache.hadoop.mapred.MapTask.runNewMapper(MapTask.java:800) at org.apache.hadoop.mapred.MapTask.run(MapTask.java:348) at org.apache.hadoop.mapred.YarnChild$2.run(YarnChild.java:178) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.mapred.YarnChild.main(YarnChild.java:172) 2024-12-05T12:07:20,798 INFO [regionserver/80666a11a09f:0.Chore.1 {}] regionserver.HRegionServer$PeriodicMemStoreFlusher(1763): MemstoreFlusherChore requesting flush of hbase:acl,,1733400133831.02119c1ced80736cbd0ab02ca19ee30c. because 02119c1ced80736cbd0ab02ca19ee30c/l has an old edit so flush to free WALs after random delay 17672 ms 2024-12-05T12:07:20,883 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733400137807_0009_000001 (auth:SIMPLE) from 127.0.0.1:48096 2024-12-05T12:07:22,264 WARN [ContainersLauncher #3 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_693627247/yarn-7091037402/MiniMRCluster_693627247-localDir-nm-0_2/usercache/jenkins/appcache/application_1733400137807_0009/container_1733400137807_0009_01_000007/launch_container.sh] 2024-12-05T12:07:22,264 WARN [ContainersLauncher #3 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_693627247/yarn-7091037402/MiniMRCluster_693627247-localDir-nm-0_2/usercache/jenkins/appcache/application_1733400137807_0009/container_1733400137807_0009_01_000007/container_tokens] 2024-12-05T12:07:22,264 WARN [ContainersLauncher #3 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_693627247/yarn-7091037402/MiniMRCluster_693627247-localDir-nm-0_2/usercache/jenkins/appcache/application_1733400137807_0009/container_1733400137807_0009_01_000007/sysfs] 2024-12-05T12:07:22,591 WARN [NM Event dispatcher {}] containermanager.ContainerManagerImpl(1784): couldn't find container container_1733400137807_0009_01_000015 while processing FINISH_CONTAINERS event 2024-12-05T12:07:23,479 WARN [NM Event dispatcher {}] containermanager.ContainerManagerImpl(1784): couldn't find container container_1733400137807_0009_01_000016 while processing FINISH_CONTAINERS event Error: java.io.IOException: Checksum mismatch between hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportWithChecksum/5c1512f0bac1eb033bb472e2b4a64235/cf/629570661b704ad0917762179199ef0b and file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/b25dcafd-a00c-c924-93f8-4d057d2ca201/local-export-1733400411098/archive/data/default/testtb-testExportWithChecksum/5c1512f0bac1eb033bb472e2b4a64235/cf/629570661b704ad0917762179199ef0b. Input and output filesystems are of different types. Their checksum algorithms may be incompatible. You can choose file-level checksum validation via -Ddfs.checksum.combine.mode=COMPOSITE_CRC when block-sizes or filesystems are different. Or you can skip checksum-checks altogether with -no-checksum-verify, for the table backup scenario, you should use -i option to skip checksum-checks. (NOTE: By skipping checksums, one runs the risk of masking data-corruption during file-transfer.) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.verifyCopyResult(ExportSnapshot.java:601) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.copyFile(ExportSnapshot.java:337) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:259) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:183) at org.apache.hadoop.mapreduce.Mapper.run(Mapper.java:145) at org.apache.hadoop.mapred.MapTask.runNewMapper(MapTask.java:800) at org.apache.hadoop.mapred.MapTask.run(MapTask.java:348) at org.apache.hadoop.mapred.YarnChild$2.run(YarnChild.java:178) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.mapred.YarnChild.main(YarnChild.java:172) 2024-12-05T12:07:24,785 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_693627247/yarn-7091037402/MiniMRCluster_693627247-localDir-nm-0_0/usercache/jenkins/appcache/application_1733400137807_0009/container_1733400137807_0009_01_000013/launch_container.sh] 2024-12-05T12:07:24,785 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_693627247/yarn-7091037402/MiniMRCluster_693627247-localDir-nm-0_0/usercache/jenkins/appcache/application_1733400137807_0009/container_1733400137807_0009_01_000013/container_tokens] 2024-12-05T12:07:24,786 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_693627247/yarn-7091037402/MiniMRCluster_693627247-localDir-nm-0_0/usercache/jenkins/appcache/application_1733400137807_0009/container_1733400137807_0009_01_000013/sysfs] 2024-12-05T12:07:24,893 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_693627247/yarn-7091037402/MiniMRCluster_693627247-localDir-nm-0_1/usercache/jenkins/appcache/application_1733400137807_0009/container_1733400137807_0009_01_000012/launch_container.sh] 2024-12-05T12:07:24,893 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_693627247/yarn-7091037402/MiniMRCluster_693627247-localDir-nm-0_1/usercache/jenkins/appcache/application_1733400137807_0009/container_1733400137807_0009_01_000012/container_tokens] 2024-12-05T12:07:24,894 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_693627247/yarn-7091037402/MiniMRCluster_693627247-localDir-nm-0_1/usercache/jenkins/appcache/application_1733400137807_0009/container_1733400137807_0009_01_000012/sysfs] 2024-12-05T12:07:24,895 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733400137807_0009_000001 (auth:SIMPLE) from 127.0.0.1:48108 Error: java.io.IOException: Checksum mismatch between hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/mobdir/data/default/testtb-testExportWithChecksum/079394da860334b7f5313f35a50f5bc6/cf/c4ca4238a0b923820dcc509a6f75849b20241205f71fd37660314339932a04a4dcbd7e8b_5c1512f0bac1eb033bb472e2b4a64235 and file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/b25dcafd-a00c-c924-93f8-4d057d2ca201/local-export-1733400411098/archive/data/default/testtb-testExportWithChecksum/079394da860334b7f5313f35a50f5bc6/cf/c4ca4238a0b923820dcc509a6f75849b20241205f71fd37660314339932a04a4dcbd7e8b_5c1512f0bac1eb033bb472e2b4a64235. Input and output filesystems are of different types. Their checksum algorithms may be incompatible. You can choose file-level checksum validation via -Ddfs.checksum.combine.mode=COMPOSITE_CRC when block-sizes or filesystems are different. Or you can skip checksum-checks altogether with -no-checksum-verify, for the table backup scenario, you should use -i option to skip checksum-checks. (NOTE: By skipping checksums, one runs the risk of masking data-corruption during file-transfer.) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.verifyCopyResult(ExportSnapshot.java:601) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.copyFile(ExportSnapshot.java:337) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:259) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:183) at org.apache.hadoop.mapreduce.Mapper.run(Mapper.java:145) at org.apache.hadoop.mapred.MapTask.runNewMapper(MapTask.java:800) at org.apache.hadoop.mapred.MapTask.run(MapTask.java:348) at org.apache.hadoop.mapred.YarnChild$2.run(YarnChild.java:178) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.mapred.YarnChild.main(YarnChild.java:172) Error: java.io.IOException: Checksum mismatch between hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportWithChecksum/ac3d10a1d44d11ee31615fff4b4a5947/cf/fa5423db36394c1ebcdc47a3d35a595f and file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/b25dcafd-a00c-c924-93f8-4d057d2ca201/local-export-1733400411098/archive/data/default/testtb-testExportWithChecksum/ac3d10a1d44d11ee31615fff4b4a5947/cf/fa5423db36394c1ebcdc47a3d35a595f. Input and output filesystems are of different types. Their checksum algorithms may be incompatible. You can choose file-level checksum validation via -Ddfs.checksum.combine.mode=COMPOSITE_CRC when block-sizes or filesystems are different. Or you can skip checksum-checks altogether with -no-checksum-verify, for the table backup scenario, you should use -i option to skip checksum-checks. (NOTE: By skipping checksums, one runs the risk of masking data-corruption during file-transfer.) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.verifyCopyResult(ExportSnapshot.java:601) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.copyFile(ExportSnapshot.java:337) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:259) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:183) at org.apache.hadoop.mapreduce.Mapper.run(Mapper.java:145) at org.apache.hadoop.mapred.MapTask.runNewMapper(MapTask.java:800) at org.apache.hadoop.mapred.MapTask.run(MapTask.java:348) at org.apache.hadoop.mapred.YarnChild$2.run(YarnChild.java:178) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.mapred.YarnChild.main(YarnChild.java:172) 2024-12-05T12:07:25,904 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733400137807_0009_000001 (auth:SIMPLE) from 127.0.0.1:39186 2024-12-05T12:07:25,910 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733400137807_0009_000001 (auth:SIMPLE) from 127.0.0.1:48120 2024-12-05T12:07:26,361 WARN [ContainersLauncher #4 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_693627247/yarn-7091037402/MiniMRCluster_693627247-localDir-nm-0_0/usercache/jenkins/appcache/application_1733400137807_0009/container_1733400137807_0009_01_000014/launch_container.sh] 2024-12-05T12:07:26,361 WARN [ContainersLauncher #4 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_693627247/yarn-7091037402/MiniMRCluster_693627247-localDir-nm-0_0/usercache/jenkins/appcache/application_1733400137807_0009/container_1733400137807_0009_01_000014/container_tokens] 2024-12-05T12:07:26,362 WARN [ContainersLauncher #4 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_693627247/yarn-7091037402/MiniMRCluster_693627247-localDir-nm-0_0/usercache/jenkins/appcache/application_1733400137807_0009/container_1733400137807_0009_01_000014/sysfs] Error: java.io.IOException: Checksum mismatch between hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/mobdir/data/default/testtb-testExportWithChecksum/079394da860334b7f5313f35a50f5bc6/cf/d41d8cd98f00b204e9800998ecf8427e2024120570882894d19a49e8abd31fe832b76c4a_ac3d10a1d44d11ee31615fff4b4a5947 and file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/b25dcafd-a00c-c924-93f8-4d057d2ca201/local-export-1733400411098/archive/data/default/testtb-testExportWithChecksum/079394da860334b7f5313f35a50f5bc6/cf/d41d8cd98f00b204e9800998ecf8427e2024120570882894d19a49e8abd31fe832b76c4a_ac3d10a1d44d11ee31615fff4b4a5947. Input and output filesystems are of different types. Their checksum algorithms may be incompatible. You can choose file-level checksum validation via -Ddfs.checksum.combine.mode=COMPOSITE_CRC when block-sizes or filesystems are different. Or you can skip checksum-checks altogether with -no-checksum-verify, for the table backup scenario, you should use -i option to skip checksum-checks. (NOTE: By skipping checksums, one runs the risk of masking data-corruption during file-transfer.) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.verifyCopyResult(ExportSnapshot.java:601) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.copyFile(ExportSnapshot.java:337) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:259) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:183) at org.apache.hadoop.mapreduce.Mapper.run(Mapper.java:145) at org.apache.hadoop.mapred.MapTask.runNewMapper(MapTask.java:800) at org.apache.hadoop.mapred.MapTask.run(MapTask.java:348) at org.apache.hadoop.mapred.YarnChild$2.run(YarnChild.java:178) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.mapred.YarnChild.main(YarnChild.java:172) 2024-12-05T12:07:26,907 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733400137807_0009_000001 (auth:SIMPLE) from 127.0.0.1:48128 2024-12-05T12:07:28,770 WARN [NM Event dispatcher {}] containermanager.ContainerManagerImpl(1784): couldn't find container container_1733400137807_0009_01_000021 while processing FINISH_CONTAINERS event 2024-12-05T12:07:28,908 WARN [ContainersLauncher #4 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_693627247/yarn-7091037402/MiniMRCluster_693627247-localDir-nm-1_0/usercache/jenkins/appcache/application_1733400137807_0009/container_1733400137807_0009_01_000011/launch_container.sh] 2024-12-05T12:07:28,908 WARN [ContainersLauncher #4 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_693627247/yarn-7091037402/MiniMRCluster_693627247-localDir-nm-1_0/usercache/jenkins/appcache/application_1733400137807_0009/container_1733400137807_0009_01_000011/container_tokens] 2024-12-05T12:07:28,908 WARN [ContainersLauncher #4 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_693627247/yarn-7091037402/MiniMRCluster_693627247-localDir-nm-1_0/usercache/jenkins/appcache/application_1733400137807_0009/container_1733400137807_0009_01_000011/sysfs] 2024-12-05T12:07:29,323 WARN [NM Event dispatcher {}] containermanager.ContainerManagerImpl(1784): couldn't find container container_1733400137807_0009_01_000022 while processing FINISH_CONTAINERS event 2024-12-05T12:07:29,866 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733400137807_0009_000001 (auth:SIMPLE) from 127.0.0.1:42862 2024-12-05T12:07:29,866 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733400137807_0009_000001 (auth:SIMPLE) from 127.0.0.1:56644 2024-12-05T12:07:29,866 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733400137807_0009_000001 (auth:SIMPLE) from 127.0.0.1:42852 2024-12-05T12:07:29,941 WARN [ContainersLauncher #5 {}] nodemanager.DefaultContainerExecutor(360): Exit code from container container_1733400137807_0009_01_000018 is : 143 2024-12-05T12:07:29,949 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742369_1545 (size=48779) 2024-12-05T12:07:29,949 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742369_1545 (size=48779) 2024-12-05T12:07:29,949 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742369_1545 (size=48779) 2024-12-05T12:07:29,993 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(360): Exit code from container container_1733400137807_0009_01_000019 is : 143 2024-12-05T12:07:30,010 WARN [ContainersLauncher #4 {}] nodemanager.DefaultContainerExecutor(360): Exit code from container container_1733400137807_0009_01_000020 is : 143 2024-12-05T12:07:30,026 WARN [ContainersLauncher #3 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_693627247/yarn-7091037402/MiniMRCluster_693627247-localDir-nm-0_1/usercache/jenkins/appcache/application_1733400137807_0009/container_1733400137807_0009_01_000019/launch_container.sh] 2024-12-05T12:07:30,026 WARN [ContainersLauncher #3 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_693627247/yarn-7091037402/MiniMRCluster_693627247-localDir-nm-0_1/usercache/jenkins/appcache/application_1733400137807_0009/container_1733400137807_0009_01_000019/container_tokens] 2024-12-05T12:07:30,027 WARN [ContainersLauncher #3 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_693627247/yarn-7091037402/MiniMRCluster_693627247-localDir-nm-0_1/usercache/jenkins/appcache/application_1733400137807_0009/container_1733400137807_0009_01_000019/sysfs] 2024-12-05T12:07:30,029 WARN [ContainersLauncher #4 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_693627247/yarn-7091037402/MiniMRCluster_693627247-localDir-nm-1_2/usercache/jenkins/appcache/application_1733400137807_0009/container_1733400137807_0009_01_000018/launch_container.sh] 2024-12-05T12:07:30,029 WARN [ContainersLauncher #4 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_693627247/yarn-7091037402/MiniMRCluster_693627247-localDir-nm-1_2/usercache/jenkins/appcache/application_1733400137807_0009/container_1733400137807_0009_01_000018/container_tokens] 2024-12-05T12:07:30,029 WARN [ContainersLauncher #4 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_693627247/yarn-7091037402/MiniMRCluster_693627247-localDir-nm-1_2/usercache/jenkins/appcache/application_1733400137807_0009/container_1733400137807_0009_01_000018/sysfs] 2024-12-05T12:07:30,032 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742370_1546 (size=460) 2024-12-05T12:07:30,032 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742370_1546 (size=460) 2024-12-05T12:07:30,034 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742370_1546 (size=460) 2024-12-05T12:07:30,042 WARN [ContainersLauncher #5 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_693627247/yarn-7091037402/MiniMRCluster_693627247-localDir-nm-0_3/usercache/jenkins/appcache/application_1733400137807_0009/container_1733400137807_0009_01_000020/launch_container.sh] 2024-12-05T12:07:30,042 WARN [ContainersLauncher #5 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_693627247/yarn-7091037402/MiniMRCluster_693627247-localDir-nm-0_3/usercache/jenkins/appcache/application_1733400137807_0009/container_1733400137807_0009_01_000020/container_tokens] 2024-12-05T12:07:30,042 WARN [ContainersLauncher #5 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_693627247/yarn-7091037402/MiniMRCluster_693627247-localDir-nm-0_3/usercache/jenkins/appcache/application_1733400137807_0009/container_1733400137807_0009_01_000020/sysfs] 2024-12-05T12:07:30,060 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742371_1547 (size=48779) 2024-12-05T12:07:30,061 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742371_1547 (size=48779) 2024-12-05T12:07:30,061 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742371_1547 (size=48779) 2024-12-05T12:07:30,084 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742372_1548 (size=349742) 2024-12-05T12:07:30,084 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742372_1548 (size=349742) 2024-12-05T12:07:30,084 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742372_1548 (size=349742) 2024-12-05T12:07:30,094 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733400137807_0009_000001 (auth:SIMPLE) from 127.0.0.1:42876 2024-12-05T12:07:31,449 ERROR [Time-limited test {}] snapshot.ExportSnapshot(1239): Snapshot export failed org.apache.hadoop.hbase.snapshot.ExportSnapshotException: Task failed task_1733400137807_0009_m_000000 Job failed as tasks failed. failedMaps:1 failedReduces:0 killedMaps:0 killedReduces: 0 at org.apache.hadoop.hbase.snapshot.ExportSnapshot.runCopyJob(ExportSnapshot.java:947) ~[classes/:?] at org.apache.hadoop.hbase.snapshot.ExportSnapshot.doWork(ExportSnapshot.java:1216) ~[classes/:?] at org.apache.hadoop.hbase.util.AbstractHBaseTool.run(AbstractHBaseTool.java:150) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.util.ToolRunner.run(ToolRunner.java:82) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hbase.snapshot.TestExportSnapshot.runExportSnapshot(TestExportSnapshot.java:570) ~[test-classes/:?] at org.apache.hadoop.hbase.snapshot.TestExportSnapshot.testExportFileSystemState(TestExportSnapshot.java:400) ~[test-classes/:?] at org.apache.hadoop.hbase.snapshot.TestExportSnapshot.testExportWithChecksum(TestExportSnapshot.java:285) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-05T12:07:31,450 INFO [Time-limited test {}] snapshot.TestExportSnapshot(515): HDFS export destination path: hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/export-test/export-1733400451450 2024-12-05T12:07:31,450 INFO [Time-limited test {}] snapshot.TestExportSnapshot(542): tgtFsUri=hdfs://localhost:37541, tgtDir=hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/export-test/export-1733400451450, rawTgtDir=hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/export-test/export-1733400451450, srcFsUri=hdfs://localhost:37541, srcDir=hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300 2024-12-05T12:07:31,472 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1094): inputFs=hdfs://localhost:37541, inputRoot=hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300 2024-12-05T12:07:31,472 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1095): outputFs=DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-954888707_22, ugi=jenkins (auth:SIMPLE)]], outputRoot=hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/export-test/export-1733400451450, skipTmp=false, initialOutputSnapshotDir=hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/export-test/export-1733400451450/.hbase-snapshot/.tmp/snaptb0-testExportWithChecksum 2024-12-05T12:07:31,474 INFO [Time-limited test {}] snapshot.ExportSnapshot(1104): Verify the source snapshot's expiration status and integrity. 2024-12-05T12:07:31,479 INFO [Time-limited test {}] snapshot.ExportSnapshot(1162): Copy Snapshot Manifest from hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/.hbase-snapshot/snaptb0-testExportWithChecksum to hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/export-test/export-1733400451450/.hbase-snapshot/.tmp/snaptb0-testExportWithChecksum 2024-12-05T12:07:31,492 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742373_1549 (size=951) 2024-12-05T12:07:31,492 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742373_1549 (size=951) 2024-12-05T12:07:31,492 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742373_1549 (size=951) 2024-12-05T12:07:31,495 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742374_1550 (size=156) 2024-12-05T12:07:31,496 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742374_1550 (size=156) 2024-12-05T12:07:31,496 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742374_1550 (size=156) 2024-12-05T12:07:31,497 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-common/target/hbase-common-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-05T12:07:31,497 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-protocol-shaded/target/hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-05T12:07:31,497 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-client/target/hbase-client-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-05T12:07:32,296 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/b25dcafd-a00c-c924-93f8-4d057d2ca201/hadoop-14139863150153425954.jar 2024-12-05T12:07:32,297 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-05T12:07:32,297 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-05T12:07:32,348 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/b25dcafd-a00c-c924-93f8-4d057d2ca201/hadoop-14536951340238639466.jar 2024-12-05T12:07:32,349 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics/target/hbase-metrics-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-05T12:07:32,349 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics-api/target/hbase-metrics-api-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-05T12:07:32,349 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-replication/target/hbase-replication-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-05T12:07:32,349 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-http/target/hbase-http-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-05T12:07:32,349 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-procedure/target/hbase-procedure-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-05T12:07:32,349 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-zookeeper/target/hbase-zookeeper-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-05T12:07:32,350 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-12-05T12:07:32,350 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-12-05T12:07:32,350 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-12-05T12:07:32,350 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-12-05T12:07:32,350 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-12-05T12:07:32,350 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-12-05T12:07:32,351 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-12-05T12:07:32,351 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-12-05T12:07:32,351 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-12-05T12:07:32,351 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-12-05T12:07:32,351 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-12-05T12:07:32,351 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-05T12:07:32,352 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-05T12:07:32,352 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-05T12:07:32,352 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-05T12:07:32,352 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-05T12:07:32,352 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-05T12:07:32,352 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-05T12:07:32,392 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742375_1551 (size=131440) 2024-12-05T12:07:32,392 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742375_1551 (size=131440) 2024-12-05T12:07:32,392 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742375_1551 (size=131440) 2024-12-05T12:07:32,406 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742376_1552 (size=4188619) 2024-12-05T12:07:32,406 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742376_1552 (size=4188619) 2024-12-05T12:07:32,406 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742376_1552 (size=4188619) 2024-12-05T12:07:32,414 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742377_1553 (size=1323991) 2024-12-05T12:07:32,414 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742377_1553 (size=1323991) 2024-12-05T12:07:32,415 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742377_1553 (size=1323991) 2024-12-05T12:07:32,422 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742378_1554 (size=903934) 2024-12-05T12:07:32,422 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742378_1554 (size=903934) 2024-12-05T12:07:32,422 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742378_1554 (size=903934) 2024-12-05T12:07:32,442 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742379_1555 (size=8360360) 2024-12-05T12:07:32,442 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742379_1555 (size=8360360) 2024-12-05T12:07:32,442 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742379_1555 (size=8360360) 2024-12-05T12:07:32,452 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742380_1556 (size=1877034) 2024-12-05T12:07:32,452 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742380_1556 (size=1877034) 2024-12-05T12:07:32,452 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742380_1556 (size=1877034) 2024-12-05T12:07:32,457 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742381_1557 (size=77835) 2024-12-05T12:07:32,457 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742381_1557 (size=77835) 2024-12-05T12:07:32,458 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742381_1557 (size=77835) 2024-12-05T12:07:32,463 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742382_1558 (size=30949) 2024-12-05T12:07:32,464 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742382_1558 (size=30949) 2024-12-05T12:07:32,464 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742382_1558 (size=30949) 2024-12-05T12:07:32,476 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742383_1559 (size=1597213) 2024-12-05T12:07:32,476 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742383_1559 (size=1597213) 2024-12-05T12:07:32,479 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742383_1559 (size=1597213) 2024-12-05T12:07:32,494 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742384_1560 (size=4695811) 2024-12-05T12:07:32,494 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742384_1560 (size=4695811) 2024-12-05T12:07:32,494 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742384_1560 (size=4695811) 2024-12-05T12:07:32,499 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742385_1561 (size=232957) 2024-12-05T12:07:32,499 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742385_1561 (size=232957) 2024-12-05T12:07:32,500 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742385_1561 (size=232957) 2024-12-05T12:07:32,505 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742386_1562 (size=127628) 2024-12-05T12:07:32,505 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742386_1562 (size=127628) 2024-12-05T12:07:32,505 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742386_1562 (size=127628) 2024-12-05T12:07:32,510 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742387_1563 (size=20406) 2024-12-05T12:07:32,510 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742387_1563 (size=20406) 2024-12-05T12:07:32,510 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742387_1563 (size=20406) 2024-12-05T12:07:32,532 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742388_1564 (size=6425023) 2024-12-05T12:07:32,532 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742388_1564 (size=6425023) 2024-12-05T12:07:32,533 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742388_1564 (size=6425023) 2024-12-05T12:07:32,548 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742389_1565 (size=5175431) 2024-12-05T12:07:32,548 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742389_1565 (size=5175431) 2024-12-05T12:07:32,548 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742389_1565 (size=5175431) 2024-12-05T12:07:32,554 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742390_1566 (size=217634) 2024-12-05T12:07:32,554 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742390_1566 (size=217634) 2024-12-05T12:07:32,554 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742390_1566 (size=217634) 2024-12-05T12:07:32,567 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742391_1567 (size=1832290) 2024-12-05T12:07:32,567 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742391_1567 (size=1832290) 2024-12-05T12:07:32,567 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742391_1567 (size=1832290) 2024-12-05T12:07:32,576 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742392_1568 (size=322274) 2024-12-05T12:07:32,576 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742392_1568 (size=322274) 2024-12-05T12:07:32,576 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742392_1568 (size=322274) 2024-12-05T12:07:32,582 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742393_1569 (size=503880) 2024-12-05T12:07:32,583 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742393_1569 (size=503880) 2024-12-05T12:07:32,583 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742393_1569 (size=503880) 2024-12-05T12:07:32,588 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742394_1570 (size=443172) 2024-12-05T12:07:32,588 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742394_1570 (size=443172) 2024-12-05T12:07:32,589 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742394_1570 (size=443172) 2024-12-05T12:07:32,600 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742395_1571 (size=29229) 2024-12-05T12:07:32,600 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742395_1571 (size=29229) 2024-12-05T12:07:32,600 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742395_1571 (size=29229) 2024-12-05T12:07:32,605 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742396_1572 (size=24096) 2024-12-05T12:07:32,605 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742396_1572 (size=24096) 2024-12-05T12:07:32,606 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742396_1572 (size=24096) 2024-12-05T12:07:32,611 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742397_1573 (size=111872) 2024-12-05T12:07:32,611 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742397_1573 (size=111872) 2024-12-05T12:07:32,611 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742397_1573 (size=111872) 2024-12-05T12:07:32,617 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742398_1574 (size=45609) 2024-12-05T12:07:32,617 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742398_1574 (size=45609) 2024-12-05T12:07:32,617 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742398_1574 (size=45609) 2024-12-05T12:07:32,623 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742399_1575 (size=136454) 2024-12-05T12:07:32,623 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742399_1575 (size=136454) 2024-12-05T12:07:32,623 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742399_1575 (size=136454) 2024-12-05T12:07:32,623 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-12-05T12:07:32,625 INFO [Time-limited test {}] snapshot.ExportSnapshot(663): Loading Snapshot 'snaptb0-testExportWithChecksum' hfile list 2024-12-05T12:07:32,627 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=0 size=14.5 K 2024-12-05T12:07:32,627 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=1 size=8.0 K 2024-12-05T12:07:32,627 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=2 size=5.8 K 2024-12-05T12:07:32,627 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=3 size=5.0 K 2024-12-05T12:07:32,632 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742400_1576 (size=1023) 2024-12-05T12:07:32,632 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742400_1576 (size=1023) 2024-12-05T12:07:32,632 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742400_1576 (size=1023) 2024-12-05T12:07:32,637 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742401_1577 (size=35) 2024-12-05T12:07:32,637 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742401_1577 (size=35) 2024-12-05T12:07:32,637 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742401_1577 (size=35) 2024-12-05T12:07:32,650 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742402_1578 (size=303996) 2024-12-05T12:07:32,650 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742402_1578 (size=303996) 2024-12-05T12:07:32,650 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742402_1578 (size=303996) 2024-12-05T12:07:35,093 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 5c1512f0bac1eb033bb472e2b4a64235, had cached 0 bytes from a total of 14853 2024-12-05T12:07:35,110 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_693627247/yarn-7091037402/MiniMRCluster_693627247-localDir-nm-0_0/usercache/jenkins/appcache/application_1733400137807_0009/container_1733400137807_0009_01_000017/launch_container.sh] 2024-12-05T12:07:35,110 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_693627247/yarn-7091037402/MiniMRCluster_693627247-localDir-nm-0_0/usercache/jenkins/appcache/application_1733400137807_0009/container_1733400137807_0009_01_000017/container_tokens] 2024-12-05T12:07:35,110 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_693627247/yarn-7091037402/MiniMRCluster_693627247-localDir-nm-0_0/usercache/jenkins/appcache/application_1733400137807_0009/container_1733400137807_0009_01_000017/sysfs] 2024-12-05T12:07:36,174 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-05T12:07:36,174 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-05T12:07:36,178 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733400137807_0009_000001 (auth:SIMPLE) from 127.0.0.1:56654 2024-12-05T12:07:36,206 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_693627247/yarn-7091037402/MiniMRCluster_693627247-localDir-nm-1_1/usercache/jenkins/appcache/application_1733400137807_0009/container_1733400137807_0009_01_000001/launch_container.sh] 2024-12-05T12:07:36,206 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_693627247/yarn-7091037402/MiniMRCluster_693627247-localDir-nm-1_1/usercache/jenkins/appcache/application_1733400137807_0009/container_1733400137807_0009_01_000001/container_tokens] 2024-12-05T12:07:36,206 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_693627247/yarn-7091037402/MiniMRCluster_693627247-localDir-nm-1_1/usercache/jenkins/appcache/application_1733400137807_0009/container_1733400137807_0009_01_000001/sysfs] 2024-12-05T12:07:37,023 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733400137807_0010_000001 (auth:SIMPLE) from 127.0.0.1:42882 2024-12-05T12:07:38,470 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 02119c1ced80736cbd0ab02ca19ee30c 1/1 column families, dataSize=1.47 KB heapSize=3.49 KB 2024-12-05T12:07:38,487 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/hbase/acl/02119c1ced80736cbd0ab02ca19ee30c/.tmp/l/c3f099503b834028ac5e5dc2d3913ad3 is 74, key is testtb-testExportFileSystemStateWithMergeRegion-1/l:/1733400385161/DeleteFamily/seqid=0 2024-12-05T12:07:38,496 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742403_1579 (size=5791) 2024-12-05T12:07:38,497 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742403_1579 (size=5791) 2024-12-05T12:07:38,497 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742403_1579 (size=5791) 2024-12-05T12:07:38,497 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.47 KB at sequenceid=28 (bloomFilter=false), to=hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/hbase/acl/02119c1ced80736cbd0ab02ca19ee30c/.tmp/l/c3f099503b834028ac5e5dc2d3913ad3 2024-12-05T12:07:38,504 INFO [MemStoreFlusher.0 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for c3f099503b834028ac5e5dc2d3913ad3 2024-12-05T12:07:38,505 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/hbase/acl/02119c1ced80736cbd0ab02ca19ee30c/.tmp/l/c3f099503b834028ac5e5dc2d3913ad3 as hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/hbase/acl/02119c1ced80736cbd0ab02ca19ee30c/l/c3f099503b834028ac5e5dc2d3913ad3 2024-12-05T12:07:38,510 INFO [MemStoreFlusher.0 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for c3f099503b834028ac5e5dc2d3913ad3 2024-12-05T12:07:38,510 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/hbase/acl/02119c1ced80736cbd0ab02ca19ee30c/l/c3f099503b834028ac5e5dc2d3913ad3, entries=13, sequenceid=28, filesize=5.7 K 2024-12-05T12:07:38,511 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~1.47 KB/1504, heapSize ~3.48 KB/3560, currentSize=0 B/0 for 02119c1ced80736cbd0ab02ca19ee30c in 42ms, sequenceid=28, compaction requested=false 2024-12-05T12:07:38,511 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 02119c1ced80736cbd0ab02ca19ee30c: 2024-12-05T12:07:38,983 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-05T12:07:41,238 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733400137807_0010_000001 (auth:SIMPLE) from 127.0.0.1:52502 2024-12-05T12:07:41,436 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742404_1580 (size=349694) 2024-12-05T12:07:41,436 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742404_1580 (size=349694) 2024-12-05T12:07:41,436 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742404_1580 (size=349694) 2024-12-05T12:07:43,432 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733400137807_0010_000001 (auth:SIMPLE) from 127.0.0.1:51030 2024-12-05T12:07:43,432 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733400137807_0010_000001 (auth:SIMPLE) from 127.0.0.1:34492 2024-12-05T12:07:44,328 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733400137807_0010_000001 (auth:SIMPLE) from 127.0.0.1:51042 2024-12-05T12:07:44,331 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733400137807_0010_000001 (auth:SIMPLE) from 127.0.0.1:34498 2024-12-05T12:07:47,177 WARN [NM Event dispatcher {}] containermanager.ContainerManagerImpl(1784): couldn't find container container_1733400137807_0010_01_000006 while processing FINISH_CONTAINERS event 2024-12-05T12:07:48,594 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742405_1581 (size=14853) 2024-12-05T12:07:48,607 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742405_1581 (size=14853) 2024-12-05T12:07:48,608 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742405_1581 (size=14853) 2024-12-05T12:07:48,967 WARN [ContainersLauncher #5 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_693627247/yarn-7091037402/MiniMRCluster_693627247-localDir-nm-0_2/usercache/jenkins/appcache/application_1733400137807_0010/container_1733400137807_0010_01_000002/launch_container.sh] 2024-12-05T12:07:48,967 WARN [ContainersLauncher #5 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_693627247/yarn-7091037402/MiniMRCluster_693627247-localDir-nm-0_2/usercache/jenkins/appcache/application_1733400137807_0010/container_1733400137807_0010_01_000002/container_tokens] 2024-12-05T12:07:48,967 WARN [ContainersLauncher #5 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_693627247/yarn-7091037402/MiniMRCluster_693627247-localDir-nm-0_2/usercache/jenkins/appcache/application_1733400137807_0010/container_1733400137807_0010_01_000002/sysfs] 2024-12-05T12:07:49,701 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742407_1583 (size=8171) 2024-12-05T12:07:49,701 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742407_1583 (size=8171) 2024-12-05T12:07:49,702 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742407_1583 (size=8171) 2024-12-05T12:07:50,000 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_693627247/yarn-7091037402/MiniMRCluster_693627247-localDir-nm-1_0/usercache/jenkins/appcache/application_1733400137807_0010/container_1733400137807_0010_01_000003/launch_container.sh] 2024-12-05T12:07:50,000 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_693627247/yarn-7091037402/MiniMRCluster_693627247-localDir-nm-1_0/usercache/jenkins/appcache/application_1733400137807_0010/container_1733400137807_0010_01_000003/container_tokens] 2024-12-05T12:07:50,000 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_693627247/yarn-7091037402/MiniMRCluster_693627247-localDir-nm-1_0/usercache/jenkins/appcache/application_1733400137807_0010/container_1733400137807_0010_01_000003/sysfs] 2024-12-05T12:07:50,652 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742408_1584 (size=5102) 2024-12-05T12:07:50,652 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742408_1584 (size=5102) 2024-12-05T12:07:50,653 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742408_1584 (size=5102) 2024-12-05T12:07:50,772 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742409_1585 (size=5906) 2024-12-05T12:07:50,773 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742409_1585 (size=5906) 2024-12-05T12:07:50,773 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742409_1585 (size=5906) 2024-12-05T12:07:50,806 WARN [ContainersLauncher #4 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_693627247/yarn-7091037402/MiniMRCluster_693627247-localDir-nm-1_3/usercache/jenkins/appcache/application_1733400137807_0010/container_1733400137807_0010_01_000005/launch_container.sh] 2024-12-05T12:07:50,806 WARN [ContainersLauncher #4 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_693627247/yarn-7091037402/MiniMRCluster_693627247-localDir-nm-1_3/usercache/jenkins/appcache/application_1733400137807_0010/container_1733400137807_0010_01_000005/container_tokens] 2024-12-05T12:07:50,806 WARN [ContainersLauncher #4 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_693627247/yarn-7091037402/MiniMRCluster_693627247-localDir-nm-1_3/usercache/jenkins/appcache/application_1733400137807_0010/container_1733400137807_0010_01_000005/sysfs] 2024-12-05T12:07:50,849 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742406_1582 (size=31733) 2024-12-05T12:07:50,849 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742406_1582 (size=31733) 2024-12-05T12:07:50,850 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742406_1582 (size=31733) 2024-12-05T12:07:50,875 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742410_1586 (size=463) 2024-12-05T12:07:50,875 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742410_1586 (size=463) 2024-12-05T12:07:50,875 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742410_1586 (size=463) 2024-12-05T12:07:50,907 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742411_1587 (size=31733) 2024-12-05T12:07:50,907 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742411_1587 (size=31733) 2024-12-05T12:07:50,907 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742411_1587 (size=31733) 2024-12-05T12:07:50,924 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742412_1588 (size=349694) 2024-12-05T12:07:50,924 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742412_1588 (size=349694) 2024-12-05T12:07:50,925 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742412_1588 (size=349694) 2024-12-05T12:07:50,938 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733400137807_0010_000001 (auth:SIMPLE) from 127.0.0.1:36008 2024-12-05T12:07:50,947 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733400137807_0010_000001 (auth:SIMPLE) from 127.0.0.1:45232 2024-12-05T12:07:50,954 WARN [ContainersLauncher #3 {}] nodemanager.DefaultContainerExecutor(360): Exit code from container container_1733400137807_0010_01_000004 is : 143 2024-12-05T12:07:50,965 WARN [ContainersLauncher #5 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_693627247/yarn-7091037402/MiniMRCluster_693627247-localDir-nm-0_0/usercache/jenkins/appcache/application_1733400137807_0010/container_1733400137807_0010_01_000004/launch_container.sh] 2024-12-05T12:07:50,965 WARN [ContainersLauncher #5 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_693627247/yarn-7091037402/MiniMRCluster_693627247-localDir-nm-0_0/usercache/jenkins/appcache/application_1733400137807_0010/container_1733400137807_0010_01_000004/container_tokens] 2024-12-05T12:07:50,965 WARN [ContainersLauncher #5 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_693627247/yarn-7091037402/MiniMRCluster_693627247-localDir-nm-0_0/usercache/jenkins/appcache/application_1733400137807_0010/container_1733400137807_0010_01_000004/sysfs] 2024-12-05T12:07:52,807 INFO [Time-limited test {}] snapshot.ExportSnapshot(1219): Finalize the Snapshot Export 2024-12-05T12:07:52,808 INFO [Time-limited test {}] snapshot.ExportSnapshot(1230): Verify the exported snapshot's expiration status and integrity. 2024-12-05T12:07:52,813 INFO [Time-limited test {}] snapshot.ExportSnapshot(1236): Export Completed: snaptb0-testExportWithChecksum 2024-12-05T12:07:52,813 INFO [Time-limited test {}] snapshot.TestExportSnapshot(409): Exported snapshot 2024-12-05T12:07:52,813 INFO [Time-limited test {}] snapshot.TestExportSnapshot(420): Verified filesystem state 2024-12-05T12:07:52,813 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-954888707_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/.hbase-snapshot/snaptb0-testExportWithChecksum at hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/.hbase-snapshot/snaptb0-testExportWithChecksum 2024-12-05T12:07:52,814 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/.hbase-snapshot/snaptb0-testExportWithChecksum/.snapshotinfo 2024-12-05T12:07:52,814 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/.hbase-snapshot/snaptb0-testExportWithChecksum/data.manifest 2024-12-05T12:07:52,814 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-954888707_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/export-test/export-1733400451450/.hbase-snapshot/snaptb0-testExportWithChecksum at hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/export-test/export-1733400451450/.hbase-snapshot/snaptb0-testExportWithChecksum 2024-12-05T12:07:52,814 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/export-test/export-1733400451450/.hbase-snapshot/snaptb0-testExportWithChecksum/.snapshotinfo 2024-12-05T12:07:52,814 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/export-test/export-1733400451450/.hbase-snapshot/snaptb0-testExportWithChecksum/data.manifest 2024-12-05T12:07:52,819 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.2 disable testtb-testExportWithChecksum 2024-12-05T12:07:52,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] procedure2.ProcedureExecutor(1139): Stored pid=227, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testtb-testExportWithChecksum 2024-12-05T12:07:52,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=227 2024-12-05T12:07:52,822 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithChecksum","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733400472821"}]},"ts":"1733400472821"} 2024-12-05T12:07:52,823 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithChecksum, state=DISABLING in hbase:meta 2024-12-05T12:07:52,823 INFO [PEWorker-3 {}] procedure.DisableTableProcedure(284): Set testtb-testExportWithChecksum to state=DISABLING 2024-12-05T12:07:52,824 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=228, ppid=227, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportWithChecksum}] 2024-12-05T12:07:52,825 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=229, ppid=228, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=ac3d10a1d44d11ee31615fff4b4a5947, UNASSIGN}, {pid=230, ppid=228, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=5c1512f0bac1eb033bb472e2b4a64235, UNASSIGN}] 2024-12-05T12:07:52,825 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=230, ppid=228, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=5c1512f0bac1eb033bb472e2b4a64235, UNASSIGN 2024-12-05T12:07:52,825 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=229, ppid=228, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=ac3d10a1d44d11ee31615fff4b4a5947, UNASSIGN 2024-12-05T12:07:52,826 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=230 updating hbase:meta row=5c1512f0bac1eb033bb472e2b4a64235, regionState=CLOSING, regionLocation=80666a11a09f,42913,1733400130845 2024-12-05T12:07:52,826 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=229 updating hbase:meta row=ac3d10a1d44d11ee31615fff4b4a5947, regionState=CLOSING, regionLocation=80666a11a09f,37629,1733400130958 2024-12-05T12:07:52,827 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=230, ppid=228, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=5c1512f0bac1eb033bb472e2b4a64235, UNASSIGN because future has completed 2024-12-05T12:07:52,828 DEBUG [PEWorker-1 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-05T12:07:52,828 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=231, ppid=230, state=RUNNABLE, hasLock=false; CloseRegionProcedure 5c1512f0bac1eb033bb472e2b4a64235, server=80666a11a09f,42913,1733400130845}] 2024-12-05T12:07:52,828 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=229, ppid=228, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=ac3d10a1d44d11ee31615fff4b4a5947, UNASSIGN because future has completed 2024-12-05T12:07:52,829 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-05T12:07:52,829 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=232, ppid=229, state=RUNNABLE, hasLock=false; CloseRegionProcedure ac3d10a1d44d11ee31615fff4b4a5947, server=80666a11a09f,37629,1733400130958}] 2024-12-05T12:07:52,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=227 2024-12-05T12:07:52,982 INFO [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=232}] handler.UnassignRegionHandler(122): Close ac3d10a1d44d11ee31615fff4b4a5947 2024-12-05T12:07:52,982 INFO [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=231}] handler.UnassignRegionHandler(122): Close 5c1512f0bac1eb033bb472e2b4a64235 2024-12-05T12:07:52,982 DEBUG [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=231}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-05T12:07:52,982 DEBUG [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=232}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-05T12:07:52,982 DEBUG [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=231}] regionserver.HRegion(1722): Closing 5c1512f0bac1eb033bb472e2b4a64235, disabling compactions & flushes 2024-12-05T12:07:52,982 DEBUG [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=232}] regionserver.HRegion(1722): Closing ac3d10a1d44d11ee31615fff4b4a5947, disabling compactions & flushes 2024-12-05T12:07:52,982 INFO [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=231}] regionserver.HRegion(1755): Closing region testtb-testExportWithChecksum,1,1733400409749.5c1512f0bac1eb033bb472e2b4a64235. 2024-12-05T12:07:52,982 INFO [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=232}] regionserver.HRegion(1755): Closing region testtb-testExportWithChecksum,,1733400409749.ac3d10a1d44d11ee31615fff4b4a5947. 2024-12-05T12:07:52,982 DEBUG [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=231}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithChecksum,1,1733400409749.5c1512f0bac1eb033bb472e2b4a64235. 2024-12-05T12:07:52,982 DEBUG [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=232}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithChecksum,,1733400409749.ac3d10a1d44d11ee31615fff4b4a5947. 2024-12-05T12:07:52,982 DEBUG [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=231}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithChecksum,1,1733400409749.5c1512f0bac1eb033bb472e2b4a64235. after waiting 0 ms 2024-12-05T12:07:52,982 DEBUG [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=232}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithChecksum,,1733400409749.ac3d10a1d44d11ee31615fff4b4a5947. after waiting 0 ms 2024-12-05T12:07:52,982 DEBUG [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=231}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithChecksum,1,1733400409749.5c1512f0bac1eb033bb472e2b4a64235. 2024-12-05T12:07:52,982 DEBUG [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=232}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithChecksum,,1733400409749.ac3d10a1d44d11ee31615fff4b4a5947. 2024-12-05T12:07:52,989 DEBUG [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=231}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportWithChecksum/5c1512f0bac1eb033bb472e2b4a64235/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-05T12:07:52,990 DEBUG [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=232}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportWithChecksum/ac3d10a1d44d11ee31615fff4b4a5947/recovered.edits/12.seqid, newMaxSeqId=12, maxSeqId=9 2024-12-05T12:07:52,990 DEBUG [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=231}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-05T12:07:52,990 INFO [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=231}] regionserver.HRegion(1973): Closed testtb-testExportWithChecksum,1,1733400409749.5c1512f0bac1eb033bb472e2b4a64235. 2024-12-05T12:07:52,990 DEBUG [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=232}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-05T12:07:52,990 DEBUG [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=231}] regionserver.HRegion(1676): Region close journal for 5c1512f0bac1eb033bb472e2b4a64235: Waiting for close lock at 1733400472982Running coprocessor pre-close hooks at 1733400472982Disabling compacts and flushes for region at 1733400472982Disabling writes for close at 1733400472982Writing region close event to WAL at 1733400472984 (+2 ms)Running coprocessor post-close hooks at 1733400472990 (+6 ms)Closed at 1733400472990 2024-12-05T12:07:52,990 INFO [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=232}] regionserver.HRegion(1973): Closed testtb-testExportWithChecksum,,1733400409749.ac3d10a1d44d11ee31615fff4b4a5947. 2024-12-05T12:07:52,990 DEBUG [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=232}] regionserver.HRegion(1676): Region close journal for ac3d10a1d44d11ee31615fff4b4a5947: Waiting for close lock at 1733400472982Running coprocessor pre-close hooks at 1733400472982Disabling compacts and flushes for region at 1733400472982Disabling writes for close at 1733400472982Writing region close event to WAL at 1733400472985 (+3 ms)Running coprocessor post-close hooks at 1733400472990 (+5 ms)Closed at 1733400472990 2024-12-05T12:07:52,993 INFO [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=231}] handler.UnassignRegionHandler(157): Closed 5c1512f0bac1eb033bb472e2b4a64235 2024-12-05T12:07:52,993 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=230 updating hbase:meta row=5c1512f0bac1eb033bb472e2b4a64235, regionState=CLOSED 2024-12-05T12:07:52,994 INFO [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=232}] handler.UnassignRegionHandler(157): Closed ac3d10a1d44d11ee31615fff4b4a5947 2024-12-05T12:07:52,994 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=229 updating hbase:meta row=ac3d10a1d44d11ee31615fff4b4a5947, regionState=CLOSED 2024-12-05T12:07:52,996 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=231, ppid=230, state=RUNNABLE, hasLock=false; CloseRegionProcedure 5c1512f0bac1eb033bb472e2b4a64235, server=80666a11a09f,42913,1733400130845 because future has completed 2024-12-05T12:07:52,996 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=232, ppid=229, state=RUNNABLE, hasLock=false; CloseRegionProcedure ac3d10a1d44d11ee31615fff4b4a5947, server=80666a11a09f,37629,1733400130958 because future has completed 2024-12-05T12:07:52,999 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=231, resume processing ppid=230 2024-12-05T12:07:52,999 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=231, ppid=230, state=SUCCESS, hasLock=false; CloseRegionProcedure 5c1512f0bac1eb033bb472e2b4a64235, server=80666a11a09f,42913,1733400130845 in 169 msec 2024-12-05T12:07:53,000 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=232, resume processing ppid=229 2024-12-05T12:07:53,000 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=232, ppid=229, state=SUCCESS, hasLock=false; CloseRegionProcedure ac3d10a1d44d11ee31615fff4b4a5947, server=80666a11a09f,37629,1733400130958 in 168 msec 2024-12-05T12:07:53,001 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=230, ppid=228, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=5c1512f0bac1eb033bb472e2b4a64235, UNASSIGN in 174 msec 2024-12-05T12:07:53,002 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=229, resume processing ppid=228 2024-12-05T12:07:53,002 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=229, ppid=228, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=ac3d10a1d44d11ee31615fff4b4a5947, UNASSIGN in 175 msec 2024-12-05T12:07:53,004 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=228, resume processing ppid=227 2024-12-05T12:07:53,004 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=228, ppid=227, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportWithChecksum in 178 msec 2024-12-05T12:07:53,005 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithChecksum","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733400473005"}]},"ts":"1733400473005"} 2024-12-05T12:07:53,006 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithChecksum, state=DISABLED in hbase:meta 2024-12-05T12:07:53,006 INFO [PEWorker-3 {}] procedure.DisableTableProcedure(296): Set testtb-testExportWithChecksum to state=DISABLED 2024-12-05T12:07:53,008 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=227, state=SUCCESS, hasLock=false; DisableTableProcedure table=testtb-testExportWithChecksum in 188 msec 2024-12-05T12:07:53,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=227 2024-12-05T12:07:53,139 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testtb-testExportWithChecksum completed 2024-12-05T12:07:53,140 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.2 delete testtb-testExportWithChecksum 2024-12-05T12:07:53,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] procedure2.ProcedureExecutor(1139): Stored pid=233, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testtb-testExportWithChecksum 2024-12-05T12:07:53,145 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=233, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportWithChecksum 2024-12-05T12:07:53,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] access.PermissionStorage(261): Removing permissions of removed table testtb-testExportWithChecksum 2024-12-05T12:07:53,147 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=233, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testtb-testExportWithChecksum 2024-12-05T12:07:53,152 DEBUG [HFileArchiver-24 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportWithChecksum/ac3d10a1d44d11ee31615fff4b4a5947 2024-12-05T12:07:53,153 DEBUG [HFileArchiver-25 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportWithChecksum/5c1512f0bac1eb033bb472e2b4a64235 2024-12-05T12:07:53,154 DEBUG [HFileArchiver-24 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportWithChecksum/ac3d10a1d44d11ee31615fff4b4a5947/cf, FileablePath, hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportWithChecksum/ac3d10a1d44d11ee31615fff4b4a5947/recovered.edits] 2024-12-05T12:07:53,154 DEBUG [HFileArchiver-25 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportWithChecksum/5c1512f0bac1eb033bb472e2b4a64235/cf, FileablePath, hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportWithChecksum/5c1512f0bac1eb033bb472e2b4a64235/recovered.edits] 2024-12-05T12:07:53,156 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42305 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testtb-testExportWithChecksum 2024-12-05T12:07:53,157 DEBUG [HFileArchiver-25 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportWithChecksum/5c1512f0bac1eb033bb472e2b4a64235/cf/629570661b704ad0917762179199ef0b to hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/archive/data/default/testtb-testExportWithChecksum/5c1512f0bac1eb033bb472e2b4a64235/cf/629570661b704ad0917762179199ef0b 2024-12-05T12:07:53,157 DEBUG [HFileArchiver-24 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportWithChecksum/ac3d10a1d44d11ee31615fff4b4a5947/cf/fa5423db36394c1ebcdc47a3d35a595f to hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/archive/data/default/testtb-testExportWithChecksum/ac3d10a1d44d11ee31615fff4b4a5947/cf/fa5423db36394c1ebcdc47a3d35a595f 2024-12-05T12:07:53,159 DEBUG [HFileArchiver-24 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportWithChecksum/ac3d10a1d44d11ee31615fff4b4a5947/recovered.edits/12.seqid to hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/archive/data/default/testtb-testExportWithChecksum/ac3d10a1d44d11ee31615fff4b4a5947/recovered.edits/12.seqid 2024-12-05T12:07:53,160 DEBUG [HFileArchiver-25 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportWithChecksum/5c1512f0bac1eb033bb472e2b4a64235/recovered.edits/9.seqid to hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/archive/data/default/testtb-testExportWithChecksum/5c1512f0bac1eb033bb472e2b4a64235/recovered.edits/9.seqid 2024-12-05T12:07:53,160 DEBUG [HFileArchiver-24 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportWithChecksum/ac3d10a1d44d11ee31615fff4b4a5947 2024-12-05T12:07:53,160 DEBUG [HFileArchiver-25 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportWithChecksum/5c1512f0bac1eb033bb472e2b4a64235 2024-12-05T12:07:53,160 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(313): Archived testtb-testExportWithChecksum regions 2024-12-05T12:07:53,160 DEBUG [PEWorker-5 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/mobdir/data/default/testtb-testExportWithChecksum/079394da860334b7f5313f35a50f5bc6 2024-12-05T12:07:53,161 DEBUG [PEWorker-5 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/mobdir/data/default/testtb-testExportWithChecksum/079394da860334b7f5313f35a50f5bc6/cf] 2024-12-05T12:07:53,163 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/mobdir/data/default/testtb-testExportWithChecksum/079394da860334b7f5313f35a50f5bc6/cf/c4ca4238a0b923820dcc509a6f75849b20241205f71fd37660314339932a04a4dcbd7e8b_5c1512f0bac1eb033bb472e2b4a64235 to hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/archive/data/default/testtb-testExportWithChecksum/079394da860334b7f5313f35a50f5bc6/cf/c4ca4238a0b923820dcc509a6f75849b20241205f71fd37660314339932a04a4dcbd7e8b_5c1512f0bac1eb033bb472e2b4a64235 2024-12-05T12:07:53,163 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/mobdir/data/default/testtb-testExportWithChecksum/079394da860334b7f5313f35a50f5bc6/cf/d41d8cd98f00b204e9800998ecf8427e2024120570882894d19a49e8abd31fe832b76c4a_ac3d10a1d44d11ee31615fff4b4a5947 to hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/archive/data/default/testtb-testExportWithChecksum/079394da860334b7f5313f35a50f5bc6/cf/d41d8cd98f00b204e9800998ecf8427e2024120570882894d19a49e8abd31fe832b76c4a_ac3d10a1d44d11ee31615fff4b4a5947 2024-12-05T12:07:53,164 DEBUG [PEWorker-5 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/mobdir/data/default/testtb-testExportWithChecksum/079394da860334b7f5313f35a50f5bc6 2024-12-05T12:07:53,165 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=233, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testtb-testExportWithChecksum 2024-12-05T12:07:53,167 WARN [PEWorker-5 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 2 rows of testtb-testExportWithChecksum from hbase:meta 2024-12-05T12:07:53,169 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(407): Removing 'testtb-testExportWithChecksum' descriptor. 2024-12-05T12:07:53,170 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=233, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testtb-testExportWithChecksum 2024-12-05T12:07:53,170 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(397): Removing 'testtb-testExportWithChecksum' from region states. 2024-12-05T12:07:53,170 DEBUG [PEWorker-5 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportWithChecksum,,1733400409749.ac3d10a1d44d11ee31615fff4b4a5947.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733400473170"}]},"ts":"9223372036854775807"} 2024-12-05T12:07:53,170 DEBUG [PEWorker-5 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportWithChecksum,1,1733400409749.5c1512f0bac1eb033bb472e2b4a64235.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733400473170"}]},"ts":"9223372036854775807"} 2024-12-05T12:07:53,172 INFO [PEWorker-5 {}] assignment.RegionStateStore(562): Deleted 2 regions from META 2024-12-05T12:07:53,172 DEBUG [PEWorker-5 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => ac3d10a1d44d11ee31615fff4b4a5947, NAME => 'testtb-testExportWithChecksum,,1733400409749.ac3d10a1d44d11ee31615fff4b4a5947.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => 5c1512f0bac1eb033bb472e2b4a64235, NAME => 'testtb-testExportWithChecksum,1,1733400409749.5c1512f0bac1eb033bb472e2b4a64235.', STARTKEY => '1', ENDKEY => ''}] 2024-12-05T12:07:53,172 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(401): Marking 'testtb-testExportWithChecksum' as deleted. 2024-12-05T12:07:53,172 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testtb-testExportWithChecksum","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733400473172"}]},"ts":"9223372036854775807"} 2024-12-05T12:07:53,174 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(867): Deleted table testtb-testExportWithChecksum state from META 2024-12-05T12:07:53,174 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(133): Finished pid=233, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportWithChecksum 2024-12-05T12:07:53,175 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=233, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testtb-testExportWithChecksum in 34 msec 2024-12-05T12:07:53,201 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34947-0x101a6a890970000, quorum=127.0.0.1:49240, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithChecksum 2024-12-05T12:07:53,201 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37629-0x101a6a890970002, quorum=127.0.0.1:49240, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithChecksum 2024-12-05T12:07:53,201 DEBUG [pool-63-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42913-0x101a6a890970001, quorum=127.0.0.1:49240, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithChecksum 2024-12-05T12:07:53,201 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42305-0x101a6a890970003, quorum=127.0.0.1:49240, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithChecksum 2024-12-05T12:07:53,201 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithChecksum with data PBUF 2024-12-05T12:07:53,201 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithChecksum with data PBUF 2024-12-05T12:07:53,201 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithChecksum with data PBUF 2024-12-05T12:07:53,202 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithChecksum with data PBUF 2024-12-05T12:07:53,209 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37629-0x101a6a890970002, quorum=127.0.0.1:49240, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithChecksum 2024-12-05T12:07:53,209 DEBUG [pool-63-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42913-0x101a6a890970001, quorum=127.0.0.1:49240, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithChecksum 2024-12-05T12:07:53,209 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34947-0x101a6a890970000, quorum=127.0.0.1:49240, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithChecksum 2024-12-05T12:07:53,209 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42305-0x101a6a890970003, quorum=127.0.0.1:49240, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithChecksum 2024-12-05T12:07:53,209 DEBUG [pool-63-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42913-0x101a6a890970001, quorum=127.0.0.1:49240, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-05T12:07:53,209 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37629-0x101a6a890970002, quorum=127.0.0.1:49240, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-05T12:07:53,209 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34947-0x101a6a890970000, quorum=127.0.0.1:49240, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-05T12:07:53,209 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42305-0x101a6a890970003, quorum=127.0.0.1:49240, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-05T12:07:53,210 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-05T12:07:53,210 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-05T12:07:53,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=233 2024-12-05T12:07:53,210 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-05T12:07:53,210 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-05T12:07:53,211 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testtb-testExportWithChecksum 2024-12-05T12:07:53,211 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testtb-testExportWithChecksum completed 2024-12-05T12:07:53,216 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "emptySnaptb0-testExportWithChecksum" type: DISABLED 2024-12-05T12:07:53,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] snapshot.SnapshotManager(381): Deleting snapshot: emptySnaptb0-testExportWithChecksum 2024-12-05T12:07:53,218 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "snaptb0-testExportWithChecksum" type: DISABLED 2024-12-05T12:07:53,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] snapshot.SnapshotManager(381): Deleting snapshot: snaptb0-testExportWithChecksum 2024-12-05T12:07:53,236 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestMobSecureExportSnapshot#testExportWithChecksum Thread=823 (was 822) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-18 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-25 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-17 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ContainersLauncher #4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferStack.transfer(SynchronousQueue.java:401) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1055630025) connection to localhost/127.0.0.1:40203 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-954888707_22 at /127.0.0.1:39420 [Waiting for operation #6] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:40203 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #15 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-16 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ContainersLauncher #3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferStack.transfer(SynchronousQueue.java:401) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RSProcedureDispatcher-pool-6 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-954888707_22 at /127.0.0.1:52670 [Waiting for operation #6] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #18 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ContainersLauncher #5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferStack.transfer(SynchronousQueue.java:401) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-19 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1621142049_1 at /127.0.0.1:43022 [Waiting for operation #3] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ContainersLauncher #3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferStack.transfer(SynchronousQueue.java:401) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #16 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #17 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-24 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: process reaper (pid 161720) java.base@17.0.11/java.lang.ProcessHandleImpl.waitForProcessExit0(Native Method) java.base@17.0.11/java.lang.ProcessHandleImpl$1.run(ProcessHandleImpl.java:150) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ContainersLauncher #5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferStack.transfer(SynchronousQueue.java:401) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ContainersLauncher #4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferStack.transfer(SynchronousQueue.java:401) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Thread-9057 java.base@17.0.11/java.io.FileInputStream.readBytes(Native Method) java.base@17.0.11/java.io.FileInputStream.read(FileInputStream.java:276) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:282) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/sun.nio.cs.StreamDecoder.readBytes(StreamDecoder.java:281) java.base@17.0.11/sun.nio.cs.StreamDecoder.implRead(StreamDecoder.java:324) java.base@17.0.11/sun.nio.cs.StreamDecoder.read(StreamDecoder.java:189) java.base@17.0.11/java.io.InputStreamReader.read(InputStreamReader.java:177) java.base@17.0.11/java.io.BufferedReader.fill(BufferedReader.java:162) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:329) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:396) app//org.apache.hadoop.util.Shell$1.run(Shell.java:1025) - Thread LEAK? -, OpenFileDescriptor=818 (was 833), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=862 (was 638) - SystemLoadAverage LEAK? -, ProcessCount=21 (was 17) - ProcessCount LEAK? -, AvailableMemoryMB=1885 (was 2270) 2024-12-05T12:07:53,236 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=823 is superior to 500 2024-12-05T12:07:53,252 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestMobSecureExportSnapshot#testExportFileSystemStateWithSkipTmp Thread=823, OpenFileDescriptor=818, MaxFileDescriptor=1048576, SystemLoadAverage=862, ProcessCount=21, AvailableMemoryMB=1885 2024-12-05T12:07:53,252 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=823 is superior to 500 2024-12-05T12:07:53,254 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'testtb-testExportFileSystemStateWithSkipTmp', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-05T12:07:53,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] procedure2.ProcedureExecutor(1139): Stored pid=234, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp 2024-12-05T12:07:53,255 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=234, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp execute state=CREATE_TABLE_PRE_OPERATION 2024-12-05T12:07:53,256 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testExportFileSystemStateWithSkipTmp" procId is: 234 2024-12-05T12:07:53,256 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=234, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-05T12:07:53,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=234 2024-12-05T12:07:53,262 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742413_1589 (size=454) 2024-12-05T12:07:53,262 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742413_1589 (size=454) 2024-12-05T12:07:53,262 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742413_1589 (size=454) 2024-12-05T12:07:53,264 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => e14a8686422d6ba50902d3f46d1fd225, NAME => 'testtb-testExportFileSystemStateWithSkipTmp,,1733400473253.e14a8686422d6ba50902d3f46d1fd225.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testExportFileSystemStateWithSkipTmp', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300 2024-12-05T12:07:53,265 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => a96226560936aca69cb372bbbf3e7979, NAME => 'testtb-testExportFileSystemStateWithSkipTmp,1,1733400473253.a96226560936aca69cb372bbbf3e7979.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testExportFileSystemStateWithSkipTmp', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300 2024-12-05T12:07:53,271 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742415_1591 (size=79) 2024-12-05T12:07:53,272 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742414_1590 (size=79) 2024-12-05T12:07:53,272 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742414_1590 (size=79) 2024-12-05T12:07:53,272 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742415_1591 (size=79) 2024-12-05T12:07:53,272 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742415_1591 (size=79) 2024-12-05T12:07:53,272 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742414_1590 (size=79) 2024-12-05T12:07:53,273 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithSkipTmp,1,1733400473253.a96226560936aca69cb372bbbf3e7979.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-05T12:07:53,273 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithSkipTmp,,1733400473253.e14a8686422d6ba50902d3f46d1fd225.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-05T12:07:53,273 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(1722): Closing a96226560936aca69cb372bbbf3e7979, disabling compactions & flushes 2024-12-05T12:07:53,273 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(1722): Closing e14a8686422d6ba50902d3f46d1fd225, disabling compactions & flushes 2024-12-05T12:07:53,273 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithSkipTmp,1,1733400473253.a96226560936aca69cb372bbbf3e7979. 2024-12-05T12:07:53,273 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithSkipTmp,1,1733400473253.a96226560936aca69cb372bbbf3e7979. 2024-12-05T12:07:53,273 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithSkipTmp,,1733400473253.e14a8686422d6ba50902d3f46d1fd225. 2024-12-05T12:07:53,273 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithSkipTmp,,1733400473253.e14a8686422d6ba50902d3f46d1fd225. 2024-12-05T12:07:53,273 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithSkipTmp,1,1733400473253.a96226560936aca69cb372bbbf3e7979. after waiting 0 ms 2024-12-05T12:07:53,273 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithSkipTmp,1,1733400473253.a96226560936aca69cb372bbbf3e7979. 2024-12-05T12:07:53,273 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithSkipTmp,,1733400473253.e14a8686422d6ba50902d3f46d1fd225. after waiting 0 ms 2024-12-05T12:07:53,273 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithSkipTmp,,1733400473253.e14a8686422d6ba50902d3f46d1fd225. 2024-12-05T12:07:53,273 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithSkipTmp,1,1733400473253.a96226560936aca69cb372bbbf3e7979. 2024-12-05T12:07:53,273 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithSkipTmp,,1733400473253.e14a8686422d6ba50902d3f46d1fd225. 2024-12-05T12:07:53,273 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(1676): Region close journal for a96226560936aca69cb372bbbf3e7979: Waiting for close lock at 1733400473273Disabling compacts and flushes for region at 1733400473273Disabling writes for close at 1733400473273Writing region close event to WAL at 1733400473273Closed at 1733400473273 2024-12-05T12:07:53,273 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(1676): Region close journal for e14a8686422d6ba50902d3f46d1fd225: Waiting for close lock at 1733400473273Disabling compacts and flushes for region at 1733400473273Disabling writes for close at 1733400473273Writing region close event to WAL at 1733400473273Closed at 1733400473273 2024-12-05T12:07:53,274 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=234, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp execute state=CREATE_TABLE_ADD_TO_META 2024-12-05T12:07:53,275 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportFileSystemStateWithSkipTmp,1,1733400473253.a96226560936aca69cb372bbbf3e7979.","families":{"info":[{"qualifier":"regioninfo","vlen":78,"tag":[],"timestamp":"1733400473274"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733400473274"}]},"ts":"1733400473274"} 2024-12-05T12:07:53,275 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportFileSystemStateWithSkipTmp,,1733400473253.e14a8686422d6ba50902d3f46d1fd225.","families":{"info":[{"qualifier":"regioninfo","vlen":78,"tag":[],"timestamp":"1733400473274"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733400473274"}]},"ts":"1733400473274"} 2024-12-05T12:07:53,277 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-12-05T12:07:53,278 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=234, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-05T12:07:53,278 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSkipTmp","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733400473278"}]},"ts":"1733400473278"} 2024-12-05T12:07:53,279 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithSkipTmp, state=ENABLING in hbase:meta 2024-12-05T12:07:53,280 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(204): Hosts are {80666a11a09f=0} racks are {/default-rack=0} 2024-12-05T12:07:53,281 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-05T12:07:53,281 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-05T12:07:53,281 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-05T12:07:53,281 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-05T12:07:53,281 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-05T12:07:53,281 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-05T12:07:53,281 INFO [PEWorker-4 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-05T12:07:53,281 INFO [PEWorker-4 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-05T12:07:53,281 INFO [PEWorker-4 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-05T12:07:53,281 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-05T12:07:53,281 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=235, ppid=234, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=e14a8686422d6ba50902d3f46d1fd225, ASSIGN}, {pid=236, ppid=234, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=a96226560936aca69cb372bbbf3e7979, ASSIGN}] 2024-12-05T12:07:53,282 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=236, ppid=234, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=a96226560936aca69cb372bbbf3e7979, ASSIGN 2024-12-05T12:07:53,282 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=235, ppid=234, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=e14a8686422d6ba50902d3f46d1fd225, ASSIGN 2024-12-05T12:07:53,283 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=236, ppid=234, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=a96226560936aca69cb372bbbf3e7979, ASSIGN; state=OFFLINE, location=80666a11a09f,42913,1733400130845; forceNewPlan=false, retain=false 2024-12-05T12:07:53,283 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(269): Starting pid=235, ppid=234, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=e14a8686422d6ba50902d3f46d1fd225, ASSIGN; state=OFFLINE, location=80666a11a09f,42305,1733400131012; forceNewPlan=false, retain=false 2024-12-05T12:07:53,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=234 2024-12-05T12:07:53,434 INFO [80666a11a09f:34947 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-12-05T12:07:53,435 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=236 updating hbase:meta row=a96226560936aca69cb372bbbf3e7979, regionState=OPENING, regionLocation=80666a11a09f,42913,1733400130845 2024-12-05T12:07:53,435 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=235 updating hbase:meta row=e14a8686422d6ba50902d3f46d1fd225, regionState=OPENING, regionLocation=80666a11a09f,42305,1733400131012 2024-12-05T12:07:53,439 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=235, ppid=234, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=e14a8686422d6ba50902d3f46d1fd225, ASSIGN because future has completed 2024-12-05T12:07:53,439 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=237, ppid=235, state=RUNNABLE, hasLock=false; OpenRegionProcedure e14a8686422d6ba50902d3f46d1fd225, server=80666a11a09f,42305,1733400131012}] 2024-12-05T12:07:53,440 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=236, ppid=234, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=a96226560936aca69cb372bbbf3e7979, ASSIGN because future has completed 2024-12-05T12:07:53,441 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=238, ppid=236, state=RUNNABLE, hasLock=false; OpenRegionProcedure a96226560936aca69cb372bbbf3e7979, server=80666a11a09f,42913,1733400130845}] 2024-12-05T12:07:53,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=234 2024-12-05T12:07:53,600 INFO [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=237}] handler.AssignRegionHandler(132): Open testtb-testExportFileSystemStateWithSkipTmp,,1733400473253.e14a8686422d6ba50902d3f46d1fd225. 2024-12-05T12:07:53,600 INFO [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=238}] handler.AssignRegionHandler(132): Open testtb-testExportFileSystemStateWithSkipTmp,1,1733400473253.a96226560936aca69cb372bbbf3e7979. 2024-12-05T12:07:53,600 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=237}] regionserver.HRegion(7752): Opening region: {ENCODED => e14a8686422d6ba50902d3f46d1fd225, NAME => 'testtb-testExportFileSystemStateWithSkipTmp,,1733400473253.e14a8686422d6ba50902d3f46d1fd225.', STARTKEY => '', ENDKEY => '1'} 2024-12-05T12:07:53,601 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=238}] regionserver.HRegion(7752): Opening region: {ENCODED => a96226560936aca69cb372bbbf3e7979, NAME => 'testtb-testExportFileSystemStateWithSkipTmp,1,1733400473253.a96226560936aca69cb372bbbf3e7979.', STARTKEY => '1', ENDKEY => ''} 2024-12-05T12:07:53,601 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=237}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportFileSystemStateWithSkipTmp,,1733400473253.e14a8686422d6ba50902d3f46d1fd225. service=AccessControlService 2024-12-05T12:07:53,601 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=238}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportFileSystemStateWithSkipTmp,1,1733400473253.a96226560936aca69cb372bbbf3e7979. service=AccessControlService 2024-12-05T12:07:53,601 INFO [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=237}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-05T12:07:53,601 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=237}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemStateWithSkipTmp e14a8686422d6ba50902d3f46d1fd225 2024-12-05T12:07:53,601 INFO [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=238}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-05T12:07:53,601 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=237}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithSkipTmp,,1733400473253.e14a8686422d6ba50902d3f46d1fd225.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-05T12:07:53,601 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=237}] regionserver.HRegion(7794): checking encryption for e14a8686422d6ba50902d3f46d1fd225 2024-12-05T12:07:53,602 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=237}] regionserver.HRegion(7797): checking classloading for e14a8686422d6ba50902d3f46d1fd225 2024-12-05T12:07:53,602 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=238}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemStateWithSkipTmp a96226560936aca69cb372bbbf3e7979 2024-12-05T12:07:53,602 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=238}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithSkipTmp,1,1733400473253.a96226560936aca69cb372bbbf3e7979.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-05T12:07:53,602 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=238}] regionserver.HRegion(7794): checking encryption for a96226560936aca69cb372bbbf3e7979 2024-12-05T12:07:53,602 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=238}] regionserver.HRegion(7797): checking classloading for a96226560936aca69cb372bbbf3e7979 2024-12-05T12:07:53,604 INFO [StoreOpener-e14a8686422d6ba50902d3f46d1fd225-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region e14a8686422d6ba50902d3f46d1fd225 2024-12-05T12:07:53,604 INFO [StoreOpener-a96226560936aca69cb372bbbf3e7979-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region a96226560936aca69cb372bbbf3e7979 2024-12-05T12:07:53,606 INFO [StoreOpener-a96226560936aca69cb372bbbf3e7979-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region a96226560936aca69cb372bbbf3e7979 columnFamilyName cf 2024-12-05T12:07:53,606 INFO [StoreOpener-e14a8686422d6ba50902d3f46d1fd225-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region e14a8686422d6ba50902d3f46d1fd225 columnFamilyName cf 2024-12-05T12:07:53,607 DEBUG [StoreOpener-a96226560936aca69cb372bbbf3e7979-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T12:07:53,607 DEBUG [StoreOpener-e14a8686422d6ba50902d3f46d1fd225-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T12:07:53,607 INFO [StoreOpener-e14a8686422d6ba50902d3f46d1fd225-1 {}] regionserver.HStore(327): Store=e14a8686422d6ba50902d3f46d1fd225/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-05T12:07:53,607 INFO [StoreOpener-a96226560936aca69cb372bbbf3e7979-1 {}] regionserver.HStore(327): Store=a96226560936aca69cb372bbbf3e7979/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-05T12:07:53,607 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=238}] regionserver.HRegion(1038): replaying wal for a96226560936aca69cb372bbbf3e7979 2024-12-05T12:07:53,607 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=237}] regionserver.HRegion(1038): replaying wal for e14a8686422d6ba50902d3f46d1fd225 2024-12-05T12:07:53,608 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=238}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportFileSystemStateWithSkipTmp/a96226560936aca69cb372bbbf3e7979 2024-12-05T12:07:53,608 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=237}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportFileSystemStateWithSkipTmp/e14a8686422d6ba50902d3f46d1fd225 2024-12-05T12:07:53,608 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=238}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportFileSystemStateWithSkipTmp/a96226560936aca69cb372bbbf3e7979 2024-12-05T12:07:53,608 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=237}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportFileSystemStateWithSkipTmp/e14a8686422d6ba50902d3f46d1fd225 2024-12-05T12:07:53,608 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=238}] regionserver.HRegion(1048): stopping wal replay for a96226560936aca69cb372bbbf3e7979 2024-12-05T12:07:53,608 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=238}] regionserver.HRegion(1060): Cleaning up temporary data for a96226560936aca69cb372bbbf3e7979 2024-12-05T12:07:53,608 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=237}] regionserver.HRegion(1048): stopping wal replay for e14a8686422d6ba50902d3f46d1fd225 2024-12-05T12:07:53,608 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=237}] regionserver.HRegion(1060): Cleaning up temporary data for e14a8686422d6ba50902d3f46d1fd225 2024-12-05T12:07:53,609 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=238}] regionserver.HRegion(1093): writing seq id for a96226560936aca69cb372bbbf3e7979 2024-12-05T12:07:53,609 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=237}] regionserver.HRegion(1093): writing seq id for e14a8686422d6ba50902d3f46d1fd225 2024-12-05T12:07:53,610 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=238}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportFileSystemStateWithSkipTmp/a96226560936aca69cb372bbbf3e7979/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-05T12:07:53,610 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=237}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportFileSystemStateWithSkipTmp/e14a8686422d6ba50902d3f46d1fd225/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-05T12:07:53,611 INFO [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=238}] regionserver.HRegion(1114): Opened a96226560936aca69cb372bbbf3e7979; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=66335696, jitterRate=-0.011521100997924805}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-05T12:07:53,611 INFO [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=237}] regionserver.HRegion(1114): Opened e14a8686422d6ba50902d3f46d1fd225; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=73249003, jitterRate=0.09149520099163055}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-05T12:07:53,611 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=238}] regionserver.HRegion(1122): Running coprocessor post-open hooks for a96226560936aca69cb372bbbf3e7979 2024-12-05T12:07:53,611 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=237}] regionserver.HRegion(1122): Running coprocessor post-open hooks for e14a8686422d6ba50902d3f46d1fd225 2024-12-05T12:07:53,611 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=238}] regionserver.HRegion(1006): Region open journal for a96226560936aca69cb372bbbf3e7979: Running coprocessor pre-open hook at 1733400473602Writing region info on filesystem at 1733400473602Initializing all the Stores at 1733400473603 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733400473603Cleaning up temporary data from old regions at 1733400473608 (+5 ms)Running coprocessor post-open hooks at 1733400473611 (+3 ms)Region opened successfully at 1733400473611 2024-12-05T12:07:53,611 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=237}] regionserver.HRegion(1006): Region open journal for e14a8686422d6ba50902d3f46d1fd225: Running coprocessor pre-open hook at 1733400473602Writing region info on filesystem at 1733400473602Initializing all the Stores at 1733400473603 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733400473603Cleaning up temporary data from old regions at 1733400473608 (+5 ms)Running coprocessor post-open hooks at 1733400473611 (+3 ms)Region opened successfully at 1733400473611 2024-12-05T12:07:53,612 INFO [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=238}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportFileSystemStateWithSkipTmp,1,1733400473253.a96226560936aca69cb372bbbf3e7979., pid=238, masterSystemTime=1733400473594 2024-12-05T12:07:53,612 INFO [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=237}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportFileSystemStateWithSkipTmp,,1733400473253.e14a8686422d6ba50902d3f46d1fd225., pid=237, masterSystemTime=1733400473593 2024-12-05T12:07:53,614 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=238}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportFileSystemStateWithSkipTmp,1,1733400473253.a96226560936aca69cb372bbbf3e7979. 2024-12-05T12:07:53,614 INFO [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=238}] handler.AssignRegionHandler(153): Opened testtb-testExportFileSystemStateWithSkipTmp,1,1733400473253.a96226560936aca69cb372bbbf3e7979. 2024-12-05T12:07:53,614 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=236 updating hbase:meta row=a96226560936aca69cb372bbbf3e7979, regionState=OPEN, openSeqNum=2, regionLocation=80666a11a09f,42913,1733400130845 2024-12-05T12:07:53,614 DEBUG [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=237}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportFileSystemStateWithSkipTmp,,1733400473253.e14a8686422d6ba50902d3f46d1fd225. 2024-12-05T12:07:53,614 INFO [RS_OPEN_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_OPEN_REGION, pid=237}] handler.AssignRegionHandler(153): Opened testtb-testExportFileSystemStateWithSkipTmp,,1733400473253.e14a8686422d6ba50902d3f46d1fd225. 2024-12-05T12:07:53,615 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=235 updating hbase:meta row=e14a8686422d6ba50902d3f46d1fd225, regionState=OPEN, openSeqNum=2, regionLocation=80666a11a09f,42305,1733400131012 2024-12-05T12:07:53,616 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=238, ppid=236, state=RUNNABLE, hasLock=false; OpenRegionProcedure a96226560936aca69cb372bbbf3e7979, server=80666a11a09f,42913,1733400130845 because future has completed 2024-12-05T12:07:53,616 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=237, ppid=235, state=RUNNABLE, hasLock=false; OpenRegionProcedure e14a8686422d6ba50902d3f46d1fd225, server=80666a11a09f,42305,1733400131012 because future has completed 2024-12-05T12:07:53,617 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=238, resume processing ppid=236 2024-12-05T12:07:53,618 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=238, ppid=236, state=SUCCESS, hasLock=false; OpenRegionProcedure a96226560936aca69cb372bbbf3e7979, server=80666a11a09f,42913,1733400130845 in 175 msec 2024-12-05T12:07:53,618 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=236, ppid=234, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=a96226560936aca69cb372bbbf3e7979, ASSIGN in 337 msec 2024-12-05T12:07:53,619 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=237, resume processing ppid=235 2024-12-05T12:07:53,619 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=237, ppid=235, state=SUCCESS, hasLock=false; OpenRegionProcedure e14a8686422d6ba50902d3f46d1fd225, server=80666a11a09f,42305,1733400131012 in 178 msec 2024-12-05T12:07:53,620 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=235, resume processing ppid=234 2024-12-05T12:07:53,620 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=235, ppid=234, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=e14a8686422d6ba50902d3f46d1fd225, ASSIGN in 338 msec 2024-12-05T12:07:53,620 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=234, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-05T12:07:53,620 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSkipTmp","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733400473620"}]},"ts":"1733400473620"} 2024-12-05T12:07:53,622 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithSkipTmp, state=ENABLED in hbase:meta 2024-12-05T12:07:53,622 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=234, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp execute state=CREATE_TABLE_POST_OPERATION 2024-12-05T12:07:53,622 DEBUG [PEWorker-4 {}] access.PermissionStorage(177): Writing permission with rowKey testtb-testExportFileSystemStateWithSkipTmp jenkins: RWXCA 2024-12-05T12:07:53,625 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42305 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemStateWithSkipTmp], kv [jenkins: RWXCA] 2024-12-05T12:07:53,634 DEBUG [pool-63-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42913-0x101a6a890970001, quorum=127.0.0.1:49240, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-05T12:07:53,634 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34947-0x101a6a890970000, quorum=127.0.0.1:49240, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-05T12:07:53,634 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37629-0x101a6a890970002, quorum=127.0.0.1:49240, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-05T12:07:53,634 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42305-0x101a6a890970003, quorum=127.0.0.1:49240, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-05T12:07:53,643 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-05T12:07:53,643 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-05T12:07:53,643 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSkipTmp with data PBUF\x0AQ\x0A\x07jenkins\x12F\x08\x03"B\x0A6\x0A\x07default\x12+testtb-testExportFileSystemStateWithSkipTmp \x00 \x01 \x02 \x03 \x04 2024-12-05T12:07:53,643 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSkipTmp with data PBUF\x0AQ\x0A\x07jenkins\x12F\x08\x03"B\x0A6\x0A\x07default\x12+testtb-testExportFileSystemStateWithSkipTmp \x00 \x01 \x02 \x03 \x04 2024-12-05T12:07:53,644 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-05T12:07:53,644 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-05T12:07:53,644 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSkipTmp with data PBUF\x0AQ\x0A\x07jenkins\x12F\x08\x03"B\x0A6\x0A\x07default\x12+testtb-testExportFileSystemStateWithSkipTmp \x00 \x01 \x02 \x03 \x04 2024-12-05T12:07:53,644 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSkipTmp with data PBUF\x0AQ\x0A\x07jenkins\x12F\x08\x03"B\x0A6\x0A\x07default\x12+testtb-testExportFileSystemStateWithSkipTmp \x00 \x01 \x02 \x03 \x04 2024-12-05T12:07:53,644 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=234, state=SUCCESS, hasLock=false; CreateTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp in 389 msec 2024-12-05T12:07:53,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=234 2024-12-05T12:07:53,887 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testtb-testExportFileSystemStateWithSkipTmp completed 2024-12-05T12:07:53,888 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithSkipTmp,, stopping at row=testtb-testExportFileSystemStateWithSkipTmp ,, for max=2147483647 with caching=100 2024-12-05T12:07:53,890 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testExportFileSystemStateWithSkipTmp 2024-12-05T12:07:53,890 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testExportFileSystemStateWithSkipTmp,,1733400473253.e14a8686422d6ba50902d3f46d1fd225. 2024-12-05T12:07:53,890 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-05T12:07:53,891 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithSkipTmp,, stopping at row=testtb-testExportFileSystemStateWithSkipTmp ,, for max=2147483647 with caching=100 2024-12-05T12:07:53,896 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithSkipTmp,, stopping at row=testtb-testExportFileSystemStateWithSkipTmp ,, for max=2147483647 with caching=100 2024-12-05T12:07:53,901 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithSkipTmp,, stopping at row=testtb-testExportFileSystemStateWithSkipTmp ,, for max=2147483647 with caching=100 2024-12-05T12:07:53,903 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } 2024-12-05T12:07:53,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1733400473903 (current time:1733400473903). 2024-12-05T12:07:53,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-05T12:07:53,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot emptySnaptb0-testExportFileSystemStateWithSkipTmp VERSION not specified, setting to 2 2024-12-05T12:07:53,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-05T12:07:53,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@65e7f8d5, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T12:07:53,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] client.ClusterIdFetcher(90): Going to request 80666a11a09f,34947,-1 for getting cluster id 2024-12-05T12:07:53,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-05T12:07:53,905 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'af3bc8f5-5d92-4279-9ca6-4b73c59b8f54' 2024-12-05T12:07:53,905 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-05T12:07:53,906 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "af3bc8f5-5d92-4279-9ca6-4b73c59b8f54" 2024-12-05T12:07:53,906 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@73127840, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T12:07:53,906 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [80666a11a09f,34947,-1] 2024-12-05T12:07:53,906 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-05T12:07:53,906 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T12:07:53,907 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59110, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-05T12:07:53,907 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4321b22c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T12:07:53,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-05T12:07:53,908 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=80666a11a09f,37629,1733400130958, seqNum=-1] 2024-12-05T12:07:53,908 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-05T12:07:53,909 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39552, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-05T12:07:53,910 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947. 2024-12-05T12:07:53,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-05T12:07:53,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T12:07:53,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T12:07:53,910 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-05T12:07:53,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@40586ab1, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T12:07:53,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] client.ClusterIdFetcher(90): Going to request 80666a11a09f,34947,-1 for getting cluster id 2024-12-05T12:07:53,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-05T12:07:53,911 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'af3bc8f5-5d92-4279-9ca6-4b73c59b8f54' 2024-12-05T12:07:53,911 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-05T12:07:53,911 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "af3bc8f5-5d92-4279-9ca6-4b73c59b8f54" 2024-12-05T12:07:53,912 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@40ad18f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T12:07:53,912 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [80666a11a09f,34947,-1] 2024-12-05T12:07:53,912 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-05T12:07:53,912 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T12:07:53,913 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59132, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-05T12:07:53,913 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@12371161, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T12:07:53,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-05T12:07:53,914 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=80666a11a09f,37629,1733400130958, seqNum=-1] 2024-12-05T12:07:53,914 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-05T12:07:53,915 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39556, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-05T12:07:53,916 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportFileSystemStateWithSkipTmp', locateType=CURRENT is [region=hbase:acl,,1733400133831.02119c1ced80736cbd0ab02ca19ee30c., hostname=80666a11a09f,42305,1733400131012, seqNum=2] 2024-12-05T12:07:53,916 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-05T12:07:53,917 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34324, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-05T12:07:53,918 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947. 2024-12-05T12:07:53,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor153.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-05T12:07:53,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T12:07:53,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T12:07:53,919 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-05T12:07:53,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemStateWithSkipTmp], kv [jenkins: RWXCA] 2024-12-05T12:07:53,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-05T12:07:53,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] procedure2.ProcedureExecutor(1139): Stored pid=239, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=239, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } 2024-12-05T12:07:53,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 }, snapshot procedure id = 239 2024-12-05T12:07:53,922 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=239, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=239, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-05T12:07:53,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=239 2024-12-05T12:07:53,922 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=239, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=239, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-05T12:07:53,924 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=239, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=239, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-05T12:07:53,930 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742416_1592 (size=203) 2024-12-05T12:07:53,931 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742416_1592 (size=203) 2024-12-05T12:07:53,931 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742416_1592 (size=203) 2024-12-05T12:07:53,935 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=239, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=239, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-05T12:07:53,935 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=240, ppid=239, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure e14a8686422d6ba50902d3f46d1fd225}, {pid=241, ppid=239, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure a96226560936aca69cb372bbbf3e7979}] 2024-12-05T12:07:53,936 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=241, ppid=239, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure a96226560936aca69cb372bbbf3e7979 2024-12-05T12:07:53,936 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=240, ppid=239, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure e14a8686422d6ba50902d3f46d1fd225 2024-12-05T12:07:54,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=239 2024-12-05T12:07:54,088 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42305 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=240 2024-12-05T12:07:54,088 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42913 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=241 2024-12-05T12:07:54,088 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=240}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithSkipTmp,,1733400473253.e14a8686422d6ba50902d3f46d1fd225. 2024-12-05T12:07:54,088 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=240}] regionserver.HRegion(2603): Flush status journal for e14a8686422d6ba50902d3f46d1fd225: 2024-12-05T12:07:54,088 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=240}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithSkipTmp,,1733400473253.e14a8686422d6ba50902d3f46d1fd225. for emptySnaptb0-testExportFileSystemStateWithSkipTmp completed. 2024-12-05T12:07:54,089 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=240}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithSkipTmp,,1733400473253.e14a8686422d6ba50902d3f46d1fd225.' region-info for snapshot=emptySnaptb0-testExportFileSystemStateWithSkipTmp 2024-12-05T12:07:54,089 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=240}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-05T12:07:54,089 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=240}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-05T12:07:54,089 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=241}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithSkipTmp,1,1733400473253.a96226560936aca69cb372bbbf3e7979. 2024-12-05T12:07:54,089 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=241}] regionserver.HRegion(2603): Flush status journal for a96226560936aca69cb372bbbf3e7979: 2024-12-05T12:07:54,089 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=241}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithSkipTmp,1,1733400473253.a96226560936aca69cb372bbbf3e7979. for emptySnaptb0-testExportFileSystemStateWithSkipTmp completed. 2024-12-05T12:07:54,089 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=241}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithSkipTmp,1,1733400473253.a96226560936aca69cb372bbbf3e7979.' region-info for snapshot=emptySnaptb0-testExportFileSystemStateWithSkipTmp 2024-12-05T12:07:54,089 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=241}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-05T12:07:54,089 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=241}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-05T12:07:54,121 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742417_1593 (size=82) 2024-12-05T12:07:54,121 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742417_1593 (size=82) 2024-12-05T12:07:54,121 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742417_1593 (size=82) 2024-12-05T12:07:54,122 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=240}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithSkipTmp,,1733400473253.e14a8686422d6ba50902d3f46d1fd225. 2024-12-05T12:07:54,122 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=240}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=240 2024-12-05T12:07:54,122 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742418_1594 (size=82) 2024-12-05T12:07:54,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] master.HMaster(4169): Remote procedure done, pid=240 2024-12-05T12:07:54,123 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportFileSystemStateWithSkipTmp on region e14a8686422d6ba50902d3f46d1fd225 2024-12-05T12:07:54,123 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=240, ppid=239, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure e14a8686422d6ba50902d3f46d1fd225 2024-12-05T12:07:54,123 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742418_1594 (size=82) 2024-12-05T12:07:54,123 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742418_1594 (size=82) 2024-12-05T12:07:54,123 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=241}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithSkipTmp,1,1733400473253.a96226560936aca69cb372bbbf3e7979. 2024-12-05T12:07:54,123 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=241}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=241 2024-12-05T12:07:54,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] master.HMaster(4169): Remote procedure done, pid=241 2024-12-05T12:07:54,124 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportFileSystemStateWithSkipTmp on region a96226560936aca69cb372bbbf3e7979 2024-12-05T12:07:54,124 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=241, ppid=239, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure a96226560936aca69cb372bbbf3e7979 2024-12-05T12:07:54,125 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=240, ppid=239, state=SUCCESS, hasLock=false; SnapshotRegionProcedure e14a8686422d6ba50902d3f46d1fd225 in 189 msec 2024-12-05T12:07:54,126 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=241, resume processing ppid=239 2024-12-05T12:07:54,127 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=241, ppid=239, state=SUCCESS, hasLock=false; SnapshotRegionProcedure a96226560936aca69cb372bbbf3e7979 in 189 msec 2024-12-05T12:07:54,127 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=239, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=239, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-05T12:07:54,127 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=239, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=239, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-05T12:07:54,128 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-12-05T12:07:54,128 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-12-05T12:07:54,128 DEBUG [MobRegionSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T12:07:54,129 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(328): No files under family: cf 2024-12-05T12:07:54,134 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742419_1595 (size=74) 2024-12-05T12:07:54,135 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742419_1595 (size=74) 2024-12-05T12:07:54,135 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742419_1595 (size=74) 2024-12-05T12:07:54,136 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=239, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=239, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-05T12:07:54,136 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testExportFileSystemStateWithSkipTmp 2024-12-05T12:07:54,137 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/.hbase-snapshot/.tmp/emptySnaptb0-testExportFileSystemStateWithSkipTmp 2024-12-05T12:07:54,144 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742420_1596 (size=697) 2024-12-05T12:07:54,144 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742420_1596 (size=697) 2024-12-05T12:07:54,145 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742420_1596 (size=697) 2024-12-05T12:07:54,146 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=239, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=239, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-05T12:07:54,150 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=239, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=239, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-05T12:07:54,151 DEBUG [PEWorker-1 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/.hbase-snapshot/.tmp/emptySnaptb0-testExportFileSystemStateWithSkipTmp to hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/.hbase-snapshot/emptySnaptb0-testExportFileSystemStateWithSkipTmp 2024-12-05T12:07:54,153 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=239, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=239, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-05T12:07:54,153 DEBUG [PEWorker-1 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 }, snapshot procedure id = 239 2024-12-05T12:07:54,155 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=239, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=239, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } in 233 msec 2024-12-05T12:07:54,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=239 2024-12-05T12:07:54,238 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportFileSystemStateWithSkipTmp completed 2024-12-05T12:07:54,243 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42305 {}] regionserver.HRegion(8528): writing data to region testtb-testExportFileSystemStateWithSkipTmp,,1733400473253.e14a8686422d6ba50902d3f46d1fd225. with WAL disabled. Data may be lost in the event of a crash. 2024-12-05T12:07:54,244 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42913 {}] regionserver.HRegion(8528): writing data to region testtb-testExportFileSystemStateWithSkipTmp,1,1733400473253.a96226560936aca69cb372bbbf3e7979. with WAL disabled. Data may be lost in the event of a crash. 2024-12-05T12:07:54,245 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithSkipTmp,, stopping at row=testtb-testExportFileSystemStateWithSkipTmp ,, for max=2147483647 with caching=100 2024-12-05T12:07:54,248 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testExportFileSystemStateWithSkipTmp 2024-12-05T12:07:54,248 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testExportFileSystemStateWithSkipTmp,,1733400473253.e14a8686422d6ba50902d3f46d1fd225. 2024-12-05T12:07:54,249 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-05T12:07:54,251 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithSkipTmp,, stopping at row=testtb-testExportFileSystemStateWithSkipTmp ,, for max=2147483647 with caching=100 2024-12-05T12:07:54,256 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithSkipTmp,, stopping at row=testtb-testExportFileSystemStateWithSkipTmp ,, for max=2147483647 with caching=100 2024-12-05T12:07:54,261 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithSkipTmp,, stopping at row=testtb-testExportFileSystemStateWithSkipTmp ,, for max=2147483647 with caching=100 2024-12-05T12:07:54,263 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } 2024-12-05T12:07:54,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1733400474263 (current time:1733400474263). 2024-12-05T12:07:54,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-05T12:07:54,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snaptb0-testExportFileSystemStateWithSkipTmp VERSION not specified, setting to 2 2024-12-05T12:07:54,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-05T12:07:54,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2843670a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T12:07:54,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] client.ClusterIdFetcher(90): Going to request 80666a11a09f,34947,-1 for getting cluster id 2024-12-05T12:07:54,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-05T12:07:54,265 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'af3bc8f5-5d92-4279-9ca6-4b73c59b8f54' 2024-12-05T12:07:54,265 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-05T12:07:54,265 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "af3bc8f5-5d92-4279-9ca6-4b73c59b8f54" 2024-12-05T12:07:54,266 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@f4cfcf5, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T12:07:54,266 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [80666a11a09f,34947,-1] 2024-12-05T12:07:54,266 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-05T12:07:54,266 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T12:07:54,267 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59146, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-05T12:07:54,268 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@31995341, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T12:07:54,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-05T12:07:54,269 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=80666a11a09f,37629,1733400130958, seqNum=-1] 2024-12-05T12:07:54,269 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-05T12:07:54,270 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39570, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-05T12:07:54,271 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947. 2024-12-05T12:07:54,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-05T12:07:54,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T12:07:54,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T12:07:54,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@c188461, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T12:07:54,272 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-05T12:07:54,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] client.ClusterIdFetcher(90): Going to request 80666a11a09f,34947,-1 for getting cluster id 2024-12-05T12:07:54,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-05T12:07:54,273 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'af3bc8f5-5d92-4279-9ca6-4b73c59b8f54' 2024-12-05T12:07:54,273 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-05T12:07:54,273 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "af3bc8f5-5d92-4279-9ca6-4b73c59b8f54" 2024-12-05T12:07:54,274 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2ff5456, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T12:07:54,274 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [80666a11a09f,34947,-1] 2024-12-05T12:07:54,274 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-05T12:07:54,274 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T12:07:54,275 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59164, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-05T12:07:54,275 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4b43832b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-05T12:07:54,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-05T12:07:54,276 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=80666a11a09f,37629,1733400130958, seqNum=-1] 2024-12-05T12:07:54,277 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-05T12:07:54,277 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39584, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-05T12:07:54,279 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportFileSystemStateWithSkipTmp', locateType=CURRENT is [region=hbase:acl,,1733400133831.02119c1ced80736cbd0ab02ca19ee30c., hostname=80666a11a09f,42305,1733400131012, seqNum=2] 2024-12-05T12:07:54,279 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-05T12:07:54,281 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34338, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-05T12:07:54,282 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947. 2024-12-05T12:07:54,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor153.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-05T12:07:54,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T12:07:54,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T12:07:54,283 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-05T12:07:54,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemStateWithSkipTmp], kv [jenkins: RWXCA] 2024-12-05T12:07:54,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-05T12:07:54,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] procedure2.ProcedureExecutor(1139): Stored pid=242, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=242, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } 2024-12-05T12:07:54,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 }, snapshot procedure id = 242 2024-12-05T12:07:54,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=242 2024-12-05T12:07:54,286 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=242, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=242, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-05T12:07:54,287 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=242, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=242, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-05T12:07:54,289 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=242, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=242, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-05T12:07:54,293 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742421_1597 (size=198) 2024-12-05T12:07:54,294 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742421_1597 (size=198) 2024-12-05T12:07:54,295 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742421_1597 (size=198) 2024-12-05T12:07:54,296 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=242, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=242, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-05T12:07:54,296 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=243, ppid=242, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure e14a8686422d6ba50902d3f46d1fd225}, {pid=244, ppid=242, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure a96226560936aca69cb372bbbf3e7979}] 2024-12-05T12:07:54,297 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=243, ppid=242, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure e14a8686422d6ba50902d3f46d1fd225 2024-12-05T12:07:54,297 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=244, ppid=242, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure a96226560936aca69cb372bbbf3e7979 2024-12-05T12:07:54,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=242 2024-12-05T12:07:54,448 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42913 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=244 2024-12-05T12:07:54,448 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42305 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=243 2024-12-05T12:07:54,448 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=243}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithSkipTmp,,1733400473253.e14a8686422d6ba50902d3f46d1fd225. 2024-12-05T12:07:54,449 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=243}] regionserver.HRegion(2902): Flushing e14a8686422d6ba50902d3f46d1fd225 1/1 column families, dataSize=132 B heapSize=544 B 2024-12-05T12:07:54,449 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=244}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithSkipTmp,1,1733400473253.a96226560936aca69cb372bbbf3e7979. 2024-12-05T12:07:54,450 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=244}] regionserver.HRegion(2902): Flushing a96226560936aca69cb372bbbf3e7979 1/1 column families, dataSize=3.13 KB heapSize=7 KB 2024-12-05T12:07:54,473 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=244}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b20241205b7d0b3b084d04758a114437af1f734cf_a96226560936aca69cb372bbbf3e7979 is 71, key is 17932ebd9ceda023bb6ce650d4573882/cf:q/1733400474244/Put/seqid=0 2024-12-05T12:07:54,475 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=243}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241205f39cc0e9ff40426b87a2b20ffcead016_e14a8686422d6ba50902d3f46d1fd225 is 71, key is 046e8b33fa96346cdd1804e1aa7e073e/cf:q/1733400474243/Put/seqid=0 2024-12-05T12:07:54,496 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742423_1599 (size=5032) 2024-12-05T12:07:54,496 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742423_1599 (size=5032) 2024-12-05T12:07:54,496 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742423_1599 (size=5032) 2024-12-05T12:07:54,496 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=243}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T12:07:54,501 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742422_1598 (size=8241) 2024-12-05T12:07:54,501 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742422_1598 (size=8241) 2024-12-05T12:07:54,501 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=243}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241205f39cc0e9ff40426b87a2b20ffcead016_e14a8686422d6ba50902d3f46d1fd225 to hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/mobdir/data/default/testtb-testExportFileSystemStateWithSkipTmp/e9516f04c7d2974b21addb190acc6c0a/cf/d41d8cd98f00b204e9800998ecf8427e20241205f39cc0e9ff40426b87a2b20ffcead016_e14a8686422d6ba50902d3f46d1fd225 2024-12-05T12:07:54,502 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742422_1598 (size=8241) 2024-12-05T12:07:54,503 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=244}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T12:07:54,503 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=243}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportFileSystemStateWithSkipTmp/e14a8686422d6ba50902d3f46d1fd225/.tmp/cf/9964ae11cd484194847a957cc3b8ca20, store: [table=testtb-testExportFileSystemStateWithSkipTmp family=cf region=e14a8686422d6ba50902d3f46d1fd225] 2024-12-05T12:07:54,503 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=243}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportFileSystemStateWithSkipTmp/e14a8686422d6ba50902d3f46d1fd225/.tmp/cf/9964ae11cd484194847a957cc3b8ca20 is 220, key is 0817e3436236f530669f5bc6d2a5510e8/cf:q/1733400474243/Put/seqid=0 2024-12-05T12:07:54,507 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=244}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b20241205b7d0b3b084d04758a114437af1f734cf_a96226560936aca69cb372bbbf3e7979 to hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/mobdir/data/default/testtb-testExportFileSystemStateWithSkipTmp/e9516f04c7d2974b21addb190acc6c0a/cf/c4ca4238a0b923820dcc509a6f75849b20241205b7d0b3b084d04758a114437af1f734cf_a96226560936aca69cb372bbbf3e7979 2024-12-05T12:07:54,513 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742424_1600 (size=5742) 2024-12-05T12:07:54,514 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742424_1600 (size=5742) 2024-12-05T12:07:54,514 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742424_1600 (size=5742) 2024-12-05T12:07:54,515 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=244}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportFileSystemStateWithSkipTmp/a96226560936aca69cb372bbbf3e7979/.tmp/cf/309759b2cbf24b5aa5ec1d4a73736ede, store: [table=testtb-testExportFileSystemStateWithSkipTmp family=cf region=a96226560936aca69cb372bbbf3e7979] 2024-12-05T12:07:54,515 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=243}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=6, memsize=132, hasBloomFilter=true, into tmp file hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportFileSystemStateWithSkipTmp/e14a8686422d6ba50902d3f46d1fd225/.tmp/cf/9964ae11cd484194847a957cc3b8ca20 2024-12-05T12:07:54,515 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=244}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportFileSystemStateWithSkipTmp/a96226560936aca69cb372bbbf3e7979/.tmp/cf/309759b2cbf24b5aa5ec1d4a73736ede is 220, key is 1d9499dcefe25b2d5e5649ec037bb5e62/cf:q/1733400474244/Put/seqid=0 2024-12-05T12:07:54,539 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=243}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportFileSystemStateWithSkipTmp/e14a8686422d6ba50902d3f46d1fd225/.tmp/cf/9964ae11cd484194847a957cc3b8ca20 as hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportFileSystemStateWithSkipTmp/e14a8686422d6ba50902d3f46d1fd225/cf/9964ae11cd484194847a957cc3b8ca20 2024-12-05T12:07:54,545 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=243}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportFileSystemStateWithSkipTmp/e14a8686422d6ba50902d3f46d1fd225/cf/9964ae11cd484194847a957cc3b8ca20, entries=2, sequenceid=6, filesize=5.6 K 2024-12-05T12:07:54,546 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=243}] regionserver.HRegion(3140): Finished flush of dataSize ~132 B/132, heapSize ~528 B/528, currentSize=0 B/0 for e14a8686422d6ba50902d3f46d1fd225 in 97ms, sequenceid=6, compaction requested=false 2024-12-05T12:07:54,546 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=243}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportFileSystemStateWithSkipTmp' 2024-12-05T12:07:54,546 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=243}] regionserver.HRegion(2603): Flush status journal for e14a8686422d6ba50902d3f46d1fd225: 2024-12-05T12:07:54,546 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=243}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithSkipTmp,,1733400473253.e14a8686422d6ba50902d3f46d1fd225. for snaptb0-testExportFileSystemStateWithSkipTmp completed. 2024-12-05T12:07:54,547 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=243}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithSkipTmp,,1733400473253.e14a8686422d6ba50902d3f46d1fd225.' region-info for snapshot=snaptb0-testExportFileSystemStateWithSkipTmp 2024-12-05T12:07:54,547 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=243}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-05T12:07:54,547 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=243}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportFileSystemStateWithSkipTmp/e14a8686422d6ba50902d3f46d1fd225/cf/9964ae11cd484194847a957cc3b8ca20] hfiles 2024-12-05T12:07:54,547 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=243}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportFileSystemStateWithSkipTmp/e14a8686422d6ba50902d3f46d1fd225/cf/9964ae11cd484194847a957cc3b8ca20 for snapshot=snaptb0-testExportFileSystemStateWithSkipTmp 2024-12-05T12:07:54,564 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742425_1601 (size=15741) 2024-12-05T12:07:54,565 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742425_1601 (size=15741) 2024-12-05T12:07:54,565 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742425_1601 (size=15741) 2024-12-05T12:07:54,566 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=244}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=6, memsize=3.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportFileSystemStateWithSkipTmp/a96226560936aca69cb372bbbf3e7979/.tmp/cf/309759b2cbf24b5aa5ec1d4a73736ede 2024-12-05T12:07:54,573 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=244}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportFileSystemStateWithSkipTmp/a96226560936aca69cb372bbbf3e7979/.tmp/cf/309759b2cbf24b5aa5ec1d4a73736ede as hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportFileSystemStateWithSkipTmp/a96226560936aca69cb372bbbf3e7979/cf/309759b2cbf24b5aa5ec1d4a73736ede 2024-12-05T12:07:54,577 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=244}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportFileSystemStateWithSkipTmp/a96226560936aca69cb372bbbf3e7979/cf/309759b2cbf24b5aa5ec1d4a73736ede, entries=48, sequenceid=6, filesize=15.4 K 2024-12-05T12:07:54,578 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=244}] regionserver.HRegion(3140): Finished flush of dataSize ~3.13 KB/3204, heapSize ~6.98 KB/7152, currentSize=0 B/0 for a96226560936aca69cb372bbbf3e7979 in 129ms, sequenceid=6, compaction requested=false 2024-12-05T12:07:54,579 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=244}] regionserver.HRegion(2603): Flush status journal for a96226560936aca69cb372bbbf3e7979: 2024-12-05T12:07:54,579 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=244}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithSkipTmp,1,1733400473253.a96226560936aca69cb372bbbf3e7979. for snaptb0-testExportFileSystemStateWithSkipTmp completed. 2024-12-05T12:07:54,579 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=244}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithSkipTmp,1,1733400473253.a96226560936aca69cb372bbbf3e7979.' region-info for snapshot=snaptb0-testExportFileSystemStateWithSkipTmp 2024-12-05T12:07:54,579 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=244}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-05T12:07:54,579 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=244}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportFileSystemStateWithSkipTmp/a96226560936aca69cb372bbbf3e7979/cf/309759b2cbf24b5aa5ec1d4a73736ede] hfiles 2024-12-05T12:07:54,579 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=244}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportFileSystemStateWithSkipTmp/a96226560936aca69cb372bbbf3e7979/cf/309759b2cbf24b5aa5ec1d4a73736ede for snapshot=snaptb0-testExportFileSystemStateWithSkipTmp 2024-12-05T12:07:54,606 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742426_1602 (size=121) 2024-12-05T12:07:54,606 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742426_1602 (size=121) 2024-12-05T12:07:54,607 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742426_1602 (size=121) 2024-12-05T12:07:54,607 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=243}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithSkipTmp,,1733400473253.e14a8686422d6ba50902d3f46d1fd225. 2024-12-05T12:07:54,607 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=243}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=243 2024-12-05T12:07:54,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] master.HMaster(4169): Remote procedure done, pid=243 2024-12-05T12:07:54,607 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportFileSystemStateWithSkipTmp on region e14a8686422d6ba50902d3f46d1fd225 2024-12-05T12:07:54,608 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=243, ppid=242, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure e14a8686422d6ba50902d3f46d1fd225 2024-12-05T12:07:54,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=242 2024-12-05T12:07:54,612 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=243, ppid=242, state=SUCCESS, hasLock=false; SnapshotRegionProcedure e14a8686422d6ba50902d3f46d1fd225 in 313 msec 2024-12-05T12:07:54,624 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742427_1603 (size=121) 2024-12-05T12:07:54,624 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742427_1603 (size=121) 2024-12-05T12:07:54,625 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742427_1603 (size=121) 2024-12-05T12:07:54,625 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=244}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithSkipTmp,1,1733400473253.a96226560936aca69cb372bbbf3e7979. 2024-12-05T12:07:54,625 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/80666a11a09f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=244}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=244 2024-12-05T12:07:54,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] master.HMaster(4169): Remote procedure done, pid=244 2024-12-05T12:07:54,625 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportFileSystemStateWithSkipTmp on region a96226560936aca69cb372bbbf3e7979 2024-12-05T12:07:54,625 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=244, ppid=242, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure a96226560936aca69cb372bbbf3e7979 2024-12-05T12:07:54,630 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=244, resume processing ppid=242 2024-12-05T12:07:54,630 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=244, ppid=242, state=SUCCESS, hasLock=false; SnapshotRegionProcedure a96226560936aca69cb372bbbf3e7979 in 330 msec 2024-12-05T12:07:54,630 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=242, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=242, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-05T12:07:54,639 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=242, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=242, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-05T12:07:54,640 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-12-05T12:07:54,640 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-12-05T12:07:54,640 DEBUG [MobRegionSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-05T12:07:54,642 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(366): Adding snapshot references for [hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/mobdir/data/default/testtb-testExportFileSystemStateWithSkipTmp/e9516f04c7d2974b21addb190acc6c0a/cf/c4ca4238a0b923820dcc509a6f75849b20241205b7d0b3b084d04758a114437af1f734cf_a96226560936aca69cb372bbbf3e7979, hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/mobdir/data/default/testtb-testExportFileSystemStateWithSkipTmp/e9516f04c7d2974b21addb190acc6c0a/cf/d41d8cd98f00b204e9800998ecf8427e20241205f39cc0e9ff40426b87a2b20ffcead016_e14a8686422d6ba50902d3f46d1fd225] hfiles 2024-12-05T12:07:54,642 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (1/2): hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/mobdir/data/default/testtb-testExportFileSystemStateWithSkipTmp/e9516f04c7d2974b21addb190acc6c0a/cf/c4ca4238a0b923820dcc509a6f75849b20241205b7d0b3b084d04758a114437af1f734cf_a96226560936aca69cb372bbbf3e7979 2024-12-05T12:07:54,642 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (2/2): hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/mobdir/data/default/testtb-testExportFileSystemStateWithSkipTmp/e9516f04c7d2974b21addb190acc6c0a/cf/d41d8cd98f00b204e9800998ecf8427e20241205f39cc0e9ff40426b87a2b20ffcead016_e14a8686422d6ba50902d3f46d1fd225 2024-12-05T12:07:54,673 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742428_1604 (size=305) 2024-12-05T12:07:54,673 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742428_1604 (size=305) 2024-12-05T12:07:54,673 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742428_1604 (size=305) 2024-12-05T12:07:54,675 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=242, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=242, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-05T12:07:54,675 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testExportFileSystemStateWithSkipTmp 2024-12-05T12:07:54,676 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithSkipTmp 2024-12-05T12:07:54,693 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742429_1605 (size=1007) 2024-12-05T12:07:54,693 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742429_1605 (size=1007) 2024-12-05T12:07:54,693 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742429_1605 (size=1007) 2024-12-05T12:07:54,696 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=242, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=242, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-05T12:07:54,703 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=242, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=242, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-05T12:07:54,704 DEBUG [PEWorker-3 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithSkipTmp to hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp 2024-12-05T12:07:54,706 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=242, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=242, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-05T12:07:54,706 DEBUG [PEWorker-3 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 }, snapshot procedure id = 242 2024-12-05T12:07:54,712 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=242, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=242, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } in 422 msec 2024-12-05T12:07:54,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=242 2024-12-05T12:07:54,918 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportFileSystemStateWithSkipTmp completed 2024-12-05T12:07:54,918 INFO [Time-limited test {}] snapshot.TestExportSnapshot(515): HDFS export destination path: hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/export-test/export-1733400474918 2024-12-05T12:07:54,918 INFO [Time-limited test {}] snapshot.TestExportSnapshot(542): tgtFsUri=hdfs://localhost:37541, tgtDir=hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/export-test/export-1733400474918, rawTgtDir=hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/export-test/export-1733400474918, srcFsUri=hdfs://localhost:37541, srcDir=hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300 2024-12-05T12:07:54,956 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1094): inputFs=hdfs://localhost:37541, inputRoot=hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300 2024-12-05T12:07:54,956 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1095): outputFs=DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-954888707_22, ugi=jenkins (auth:SIMPLE)]], outputRoot=hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/export-test/export-1733400474918, skipTmp=true, initialOutputSnapshotDir=hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/export-test/export-1733400474918/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp 2024-12-05T12:07:54,958 INFO [Time-limited test {}] snapshot.ExportSnapshot(1104): Verify the source snapshot's expiration status and integrity. 2024-12-05T12:07:54,976 INFO [Time-limited test {}] snapshot.ExportSnapshot(1162): Copy Snapshot Manifest from hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp to hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/export-test/export-1733400474918/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp 2024-12-05T12:07:55,003 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742430_1606 (size=198) 2024-12-05T12:07:55,003 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742430_1606 (size=198) 2024-12-05T12:07:55,003 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742430_1606 (size=198) 2024-12-05T12:07:55,018 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742431_1607 (size=1007) 2024-12-05T12:07:55,023 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742431_1607 (size=1007) 2024-12-05T12:07:55,024 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742431_1607 (size=1007) 2024-12-05T12:07:55,027 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-common/target/hbase-common-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-05T12:07:55,027 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-protocol-shaded/target/hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-05T12:07:55,028 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-client/target/hbase-client-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-05T12:07:56,179 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/b25dcafd-a00c-c924-93f8-4d057d2ca201/hadoop-17229515955659487551.jar 2024-12-05T12:07:56,180 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-05T12:07:56,180 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-05T12:07:56,261 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/b25dcafd-a00c-c924-93f8-4d057d2ca201/hadoop-2883415996245947916.jar 2024-12-05T12:07:56,261 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics/target/hbase-metrics-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-05T12:07:56,262 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics-api/target/hbase-metrics-api-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-05T12:07:56,262 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-replication/target/hbase-replication-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-05T12:07:56,262 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-http/target/hbase-http-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-05T12:07:56,262 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-procedure/target/hbase-procedure-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-05T12:07:56,263 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-zookeeper/target/hbase-zookeeper-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-05T12:07:56,263 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-12-05T12:07:56,263 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-12-05T12:07:56,263 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-12-05T12:07:56,264 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-12-05T12:07:56,264 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-12-05T12:07:56,264 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-12-05T12:07:56,264 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-12-05T12:07:56,265 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-12-05T12:07:56,265 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-12-05T12:07:56,265 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-12-05T12:07:56,265 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-12-05T12:07:56,266 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-05T12:07:56,266 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-05T12:07:56,266 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-05T12:07:56,267 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-05T12:07:56,267 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-05T12:07:56,267 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-05T12:07:56,267 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-05T12:07:56,423 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742432_1608 (size=131440) 2024-12-05T12:07:56,423 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742432_1608 (size=131440) 2024-12-05T12:07:56,424 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742432_1608 (size=131440) 2024-12-05T12:07:56,469 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742433_1609 (size=4188619) 2024-12-05T12:07:56,469 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742433_1609 (size=4188619) 2024-12-05T12:07:56,470 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742433_1609 (size=4188619) 2024-12-05T12:07:56,479 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742434_1610 (size=1323991) 2024-12-05T12:07:56,479 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742434_1610 (size=1323991) 2024-12-05T12:07:56,479 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742434_1610 (size=1323991) 2024-12-05T12:07:56,487 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742435_1611 (size=903934) 2024-12-05T12:07:56,487 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742435_1611 (size=903934) 2024-12-05T12:07:56,488 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742435_1611 (size=903934) 2024-12-05T12:07:56,530 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742436_1612 (size=8360360) 2024-12-05T12:07:56,530 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742436_1612 (size=8360360) 2024-12-05T12:07:56,531 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742436_1612 (size=8360360) 2024-12-05T12:07:56,555 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742437_1613 (size=1877034) 2024-12-05T12:07:56,555 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742437_1613 (size=1877034) 2024-12-05T12:07:56,556 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742437_1613 (size=1877034) 2024-12-05T12:07:56,567 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742438_1614 (size=77835) 2024-12-05T12:07:56,567 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742438_1614 (size=77835) 2024-12-05T12:07:56,567 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742438_1614 (size=77835) 2024-12-05T12:07:56,575 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742439_1615 (size=30949) 2024-12-05T12:07:56,575 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742439_1615 (size=30949) 2024-12-05T12:07:56,575 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742439_1615 (size=30949) 2024-12-05T12:07:56,586 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742440_1616 (size=1597213) 2024-12-05T12:07:56,586 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742440_1616 (size=1597213) 2024-12-05T12:07:56,586 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742440_1616 (size=1597213) 2024-12-05T12:07:56,612 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742441_1617 (size=4695811) 2024-12-05T12:07:56,612 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742441_1617 (size=4695811) 2024-12-05T12:07:56,613 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742441_1617 (size=4695811) 2024-12-05T12:07:56,625 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742442_1618 (size=232957) 2024-12-05T12:07:56,625 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742442_1618 (size=232957) 2024-12-05T12:07:56,625 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742442_1618 (size=232957) 2024-12-05T12:07:56,640 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742443_1619 (size=127628) 2024-12-05T12:07:56,641 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742443_1619 (size=127628) 2024-12-05T12:07:56,641 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742443_1619 (size=127628) 2024-12-05T12:07:56,647 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742444_1620 (size=20406) 2024-12-05T12:07:56,647 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742444_1620 (size=20406) 2024-12-05T12:07:56,647 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742444_1620 (size=20406) 2024-12-05T12:07:56,669 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742445_1621 (size=5175431) 2024-12-05T12:07:56,669 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742445_1621 (size=5175431) 2024-12-05T12:07:56,669 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742445_1621 (size=5175431) 2024-12-05T12:07:56,689 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742446_1622 (size=217634) 2024-12-05T12:07:56,689 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742446_1622 (size=217634) 2024-12-05T12:07:56,691 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742446_1622 (size=217634) 2024-12-05T12:07:56,729 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742447_1623 (size=6425023) 2024-12-05T12:07:56,730 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742447_1623 (size=6425023) 2024-12-05T12:07:56,730 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742447_1623 (size=6425023) 2024-12-05T12:07:56,760 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742448_1624 (size=1832290) 2024-12-05T12:07:56,760 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742448_1624 (size=1832290) 2024-12-05T12:07:56,760 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742448_1624 (size=1832290) 2024-12-05T12:07:56,777 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742449_1625 (size=322274) 2024-12-05T12:07:56,777 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742449_1625 (size=322274) 2024-12-05T12:07:56,778 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742449_1625 (size=322274) 2024-12-05T12:07:56,800 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742450_1626 (size=503880) 2024-12-05T12:07:56,800 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742450_1626 (size=503880) 2024-12-05T12:07:56,807 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742450_1626 (size=503880) 2024-12-05T12:07:56,821 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742451_1627 (size=29229) 2024-12-05T12:07:56,821 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742451_1627 (size=29229) 2024-12-05T12:07:56,821 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742451_1627 (size=29229) 2024-12-05T12:07:56,835 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742452_1628 (size=24096) 2024-12-05T12:07:56,835 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742452_1628 (size=24096) 2024-12-05T12:07:56,836 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742452_1628 (size=24096) 2024-12-05T12:07:56,852 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742453_1629 (size=443172) 2024-12-05T12:07:56,853 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742453_1629 (size=443172) 2024-12-05T12:07:56,853 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742453_1629 (size=443172) 2024-12-05T12:07:56,870 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742454_1630 (size=111872) 2024-12-05T12:07:56,870 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742454_1630 (size=111872) 2024-12-05T12:07:56,870 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742454_1630 (size=111872) 2024-12-05T12:07:56,884 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742455_1631 (size=45609) 2024-12-05T12:07:56,884 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742455_1631 (size=45609) 2024-12-05T12:07:56,884 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742455_1631 (size=45609) 2024-12-05T12:07:56,897 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742456_1632 (size=136454) 2024-12-05T12:07:56,897 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742456_1632 (size=136454) 2024-12-05T12:07:56,897 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742456_1632 (size=136454) 2024-12-05T12:07:56,902 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-12-05T12:07:56,904 INFO [Time-limited test {}] snapshot.ExportSnapshot(663): Loading Snapshot 'snaptb0-testExportFileSystemStateWithSkipTmp' hfile list 2024-12-05T12:07:56,906 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=0 size=15.4 K 2024-12-05T12:07:56,906 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=1 size=8.0 K 2024-12-05T12:07:56,906 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=2 size=5.6 K 2024-12-05T12:07:56,906 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=3 size=4.9 K 2024-12-05T12:07:56,924 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742457_1633 (size=1079) 2024-12-05T12:07:56,924 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742457_1633 (size=1079) 2024-12-05T12:07:56,924 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742457_1633 (size=1079) 2024-12-05T12:07:56,936 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742458_1634 (size=35) 2024-12-05T12:07:56,936 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742458_1634 (size=35) 2024-12-05T12:07:56,936 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742458_1634 (size=35) 2024-12-05T12:07:56,994 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742459_1635 (size=304168) 2024-12-05T12:07:56,995 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742459_1635 (size=304168) 2024-12-05T12:07:56,995 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742459_1635 (size=304168) 2024-12-05T12:07:57,125 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-05T12:07:57,125 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-05T12:07:57,141 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733400137807_0010_000001 (auth:SIMPLE) from 127.0.0.1:36010 2024-12-05T12:07:57,172 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_693627247/yarn-7091037402/MiniMRCluster_693627247-localDir-nm-0_3/usercache/jenkins/appcache/application_1733400137807_0010/container_1733400137807_0010_01_000001/launch_container.sh] 2024-12-05T12:07:57,172 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_693627247/yarn-7091037402/MiniMRCluster_693627247-localDir-nm-0_3/usercache/jenkins/appcache/application_1733400137807_0010/container_1733400137807_0010_01_000001/container_tokens] 2024-12-05T12:07:57,172 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_693627247/yarn-7091037402/MiniMRCluster_693627247-localDir-nm-0_3/usercache/jenkins/appcache/application_1733400137807_0010/container_1733400137807_0010_01_000001/sysfs] 2024-12-05T12:07:57,354 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region e2d55a422a8d6cd55d6d7cbfae79e377, had cached 0 bytes from a total of 14661 2024-12-05T12:07:57,354 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region d819fc7dca4529788a4ca3a58e3fe9e5, had cached 0 bytes from a total of 5888 2024-12-05T12:07:57,798 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733400137807_0011_000001 (auth:SIMPLE) from 127.0.0.1:45234 2024-12-05T12:07:58,513 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-05T12:08:00,361 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithSkipTmp 2024-12-05T12:08:00,361 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithSkipTmp Metrics about Tables on a single HBase RegionServer 2024-12-05T12:08:00,362 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithChecksum 2024-12-05T12:08:01,833 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733400137807_0011_000001 (auth:SIMPLE) from 127.0.0.1:33004 2024-12-05T12:08:02,039 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742460_1636 (size=349890) 2024-12-05T12:08:02,040 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742460_1636 (size=349890) 2024-12-05T12:08:02,040 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742460_1636 (size=349890) 2024-12-05T12:08:04,029 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733400137807_0011_000001 (auth:SIMPLE) from 127.0.0.1:52048 2024-12-05T12:08:04,029 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733400137807_0011_000001 (auth:SIMPLE) from 127.0.0.1:41550 2024-12-05T12:08:04,921 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733400137807_0011_000001 (auth:SIMPLE) from 127.0.0.1:41564 2024-12-05T12:08:04,923 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733400137807_0011_000001 (auth:SIMPLE) from 127.0.0.1:52060 2024-12-05T12:08:05,864 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-05T12:08:07,127 WARN [NM Event dispatcher {}] containermanager.ContainerManagerImpl(1784): couldn't find container container_1733400137807_0011_01_000006 while processing FINISH_CONTAINERS event 2024-12-05T12:08:08,537 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742461_1637 (size=8241) 2024-12-05T12:08:08,537 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742461_1637 (size=8241) 2024-12-05T12:08:08,538 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742461_1637 (size=8241) 2024-12-05T12:08:08,983 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-05T12:08:09,041 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742463_1639 (size=15741) 2024-12-05T12:08:09,042 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742463_1639 (size=15741) 2024-12-05T12:08:09,042 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742463_1639 (size=15741) 2024-12-05T12:08:09,317 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_693627247/yarn-7091037402/MiniMRCluster_693627247-localDir-nm-0_1/usercache/jenkins/appcache/application_1733400137807_0011/container_1733400137807_0011_01_000002/launch_container.sh] 2024-12-05T12:08:09,318 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_693627247/yarn-7091037402/MiniMRCluster_693627247-localDir-nm-0_1/usercache/jenkins/appcache/application_1733400137807_0011/container_1733400137807_0011_01_000002/container_tokens] 2024-12-05T12:08:09,318 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_693627247/yarn-7091037402/MiniMRCluster_693627247-localDir-nm-0_1/usercache/jenkins/appcache/application_1733400137807_0011/container_1733400137807_0011_01_000002/sysfs] 2024-12-05T12:08:09,931 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742464_1640 (size=5742) 2024-12-05T12:08:09,931 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742464_1640 (size=5742) 2024-12-05T12:08:09,931 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742464_1640 (size=5742) 2024-12-05T12:08:09,949 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742465_1641 (size=5032) 2024-12-05T12:08:09,949 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742465_1641 (size=5032) 2024-12-05T12:08:09,949 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742465_1641 (size=5032) 2024-12-05T12:08:10,001 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742462_1638 (size=31807) 2024-12-05T12:08:10,001 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742462_1638 (size=31807) 2024-12-05T12:08:10,001 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742462_1638 (size=31807) 2024-12-05T12:08:10,016 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742466_1642 (size=477) 2024-12-05T12:08:10,016 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742466_1642 (size=477) 2024-12-05T12:08:10,016 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742466_1642 (size=477) 2024-12-05T12:08:10,057 WARN [ContainersLauncher #5 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_693627247/yarn-7091037402/MiniMRCluster_693627247-localDir-nm-1_2/usercache/jenkins/appcache/application_1733400137807_0011/container_1733400137807_0011_01_000005/launch_container.sh] 2024-12-05T12:08:10,057 WARN [ContainersLauncher #5 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_693627247/yarn-7091037402/MiniMRCluster_693627247-localDir-nm-1_2/usercache/jenkins/appcache/application_1733400137807_0011/container_1733400137807_0011_01_000005/container_tokens] 2024-12-05T12:08:10,057 WARN [ContainersLauncher #5 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_693627247/yarn-7091037402/MiniMRCluster_693627247-localDir-nm-1_2/usercache/jenkins/appcache/application_1733400137807_0011/container_1733400137807_0011_01_000005/sysfs] 2024-12-05T12:08:10,059 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742467_1643 (size=31807) 2024-12-05T12:08:10,059 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742467_1643 (size=31807) 2024-12-05T12:08:10,059 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742467_1643 (size=31807) 2024-12-05T12:08:10,061 WARN [ContainersLauncher #5 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_693627247/yarn-7091037402/MiniMRCluster_693627247-localDir-nm-0_1/usercache/jenkins/appcache/application_1733400137807_0011/container_1733400137807_0011_01_000004/launch_container.sh] 2024-12-05T12:08:10,061 WARN [ContainersLauncher #5 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_693627247/yarn-7091037402/MiniMRCluster_693627247-localDir-nm-0_1/usercache/jenkins/appcache/application_1733400137807_0011/container_1733400137807_0011_01_000004/container_tokens] 2024-12-05T12:08:10,061 WARN [ContainersLauncher #5 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_693627247/yarn-7091037402/MiniMRCluster_693627247-localDir-nm-0_1/usercache/jenkins/appcache/application_1733400137807_0011/container_1733400137807_0011_01_000004/sysfs] 2024-12-05T12:08:10,072 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742468_1644 (size=349890) 2024-12-05T12:08:10,072 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742468_1644 (size=349890) 2024-12-05T12:08:10,073 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742468_1644 (size=349890) 2024-12-05T12:08:10,084 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733400137807_0011_000001 (auth:SIMPLE) from 127.0.0.1:34808 2024-12-05T12:08:10,089 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733400137807_0011_000001 (auth:SIMPLE) from 127.0.0.1:35890 2024-12-05T12:08:11,170 INFO [Time-limited test {}] snapshot.ExportSnapshot(1219): Finalize the Snapshot Export 2024-12-05T12:08:11,170 INFO [Time-limited test {}] snapshot.ExportSnapshot(1230): Verify the exported snapshot's expiration status and integrity. 2024-12-05T12:08:11,175 INFO [Time-limited test {}] snapshot.ExportSnapshot(1236): Export Completed: snaptb0-testExportFileSystemStateWithSkipTmp 2024-12-05T12:08:11,175 INFO [Time-limited test {}] snapshot.TestExportSnapshot(409): Exported snapshot 2024-12-05T12:08:11,175 INFO [Time-limited test {}] snapshot.TestExportSnapshot(420): Verified filesystem state 2024-12-05T12:08:11,175 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-954888707_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp at hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp 2024-12-05T12:08:11,176 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp/.snapshotinfo 2024-12-05T12:08:11,176 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp/data.manifest 2024-12-05T12:08:11,176 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-954888707_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/export-test/export-1733400474918/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp at hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/export-test/export-1733400474918/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp 2024-12-05T12:08:11,176 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/export-test/export-1733400474918/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp/.snapshotinfo 2024-12-05T12:08:11,176 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/export-test/export-1733400474918/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp/data.manifest 2024-12-05T12:08:11,181 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.2 disable testtb-testExportFileSystemStateWithSkipTmp 2024-12-05T12:08:11,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] procedure2.ProcedureExecutor(1139): Stored pid=245, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp 2024-12-05T12:08:11,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=245 2024-12-05T12:08:11,183 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSkipTmp","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733400491183"}]},"ts":"1733400491183"} 2024-12-05T12:08:11,184 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithSkipTmp, state=DISABLING in hbase:meta 2024-12-05T12:08:11,184 INFO [PEWorker-5 {}] procedure.DisableTableProcedure(284): Set testtb-testExportFileSystemStateWithSkipTmp to state=DISABLING 2024-12-05T12:08:11,185 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=246, ppid=245, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportFileSystemStateWithSkipTmp}] 2024-12-05T12:08:11,186 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=247, ppid=246, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=e14a8686422d6ba50902d3f46d1fd225, UNASSIGN}, {pid=248, ppid=246, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=a96226560936aca69cb372bbbf3e7979, UNASSIGN}] 2024-12-05T12:08:11,187 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=247, ppid=246, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=e14a8686422d6ba50902d3f46d1fd225, UNASSIGN 2024-12-05T12:08:11,187 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=248, ppid=246, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=a96226560936aca69cb372bbbf3e7979, UNASSIGN 2024-12-05T12:08:11,187 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=248 updating hbase:meta row=a96226560936aca69cb372bbbf3e7979, regionState=CLOSING, regionLocation=80666a11a09f,42913,1733400130845 2024-12-05T12:08:11,187 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=247 updating hbase:meta row=e14a8686422d6ba50902d3f46d1fd225, regionState=CLOSING, regionLocation=80666a11a09f,42305,1733400131012 2024-12-05T12:08:11,189 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=247, ppid=246, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=e14a8686422d6ba50902d3f46d1fd225, UNASSIGN because future has completed 2024-12-05T12:08:11,189 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-05T12:08:11,189 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=249, ppid=247, state=RUNNABLE, hasLock=false; CloseRegionProcedure e14a8686422d6ba50902d3f46d1fd225, server=80666a11a09f,42305,1733400131012}] 2024-12-05T12:08:11,189 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=248, ppid=246, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=a96226560936aca69cb372bbbf3e7979, UNASSIGN because future has completed 2024-12-05T12:08:11,190 DEBUG [PEWorker-5 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-05T12:08:11,190 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=250, ppid=248, state=RUNNABLE, hasLock=false; CloseRegionProcedure a96226560936aca69cb372bbbf3e7979, server=80666a11a09f,42913,1733400130845}] 2024-12-05T12:08:11,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=245 2024-12-05T12:08:11,341 INFO [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=249}] handler.UnassignRegionHandler(122): Close e14a8686422d6ba50902d3f46d1fd225 2024-12-05T12:08:11,342 DEBUG [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=249}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-05T12:08:11,342 DEBUG [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=249}] regionserver.HRegion(1722): Closing e14a8686422d6ba50902d3f46d1fd225, disabling compactions & flushes 2024-12-05T12:08:11,342 INFO [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=249}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithSkipTmp,,1733400473253.e14a8686422d6ba50902d3f46d1fd225. 2024-12-05T12:08:11,342 DEBUG [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=249}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithSkipTmp,,1733400473253.e14a8686422d6ba50902d3f46d1fd225. 2024-12-05T12:08:11,342 DEBUG [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=249}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithSkipTmp,,1733400473253.e14a8686422d6ba50902d3f46d1fd225. after waiting 0 ms 2024-12-05T12:08:11,342 DEBUG [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=249}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithSkipTmp,,1733400473253.e14a8686422d6ba50902d3f46d1fd225. 2024-12-05T12:08:11,342 INFO [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=250}] handler.UnassignRegionHandler(122): Close a96226560936aca69cb372bbbf3e7979 2024-12-05T12:08:11,343 DEBUG [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=250}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-05T12:08:11,343 DEBUG [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=250}] regionserver.HRegion(1722): Closing a96226560936aca69cb372bbbf3e7979, disabling compactions & flushes 2024-12-05T12:08:11,343 INFO [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=250}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithSkipTmp,1,1733400473253.a96226560936aca69cb372bbbf3e7979. 2024-12-05T12:08:11,343 DEBUG [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=250}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithSkipTmp,1,1733400473253.a96226560936aca69cb372bbbf3e7979. 2024-12-05T12:08:11,343 DEBUG [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=250}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithSkipTmp,1,1733400473253.a96226560936aca69cb372bbbf3e7979. after waiting 0 ms 2024-12-05T12:08:11,343 DEBUG [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=250}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithSkipTmp,1,1733400473253.a96226560936aca69cb372bbbf3e7979. 2024-12-05T12:08:11,350 DEBUG [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=249}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportFileSystemStateWithSkipTmp/e14a8686422d6ba50902d3f46d1fd225/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-05T12:08:11,350 DEBUG [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=250}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportFileSystemStateWithSkipTmp/a96226560936aca69cb372bbbf3e7979/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-05T12:08:11,350 DEBUG [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=249}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-05T12:08:11,350 DEBUG [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=250}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-05T12:08:11,350 INFO [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=249}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithSkipTmp,,1733400473253.e14a8686422d6ba50902d3f46d1fd225. 2024-12-05T12:08:11,350 INFO [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=250}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithSkipTmp,1,1733400473253.a96226560936aca69cb372bbbf3e7979. 2024-12-05T12:08:11,350 DEBUG [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=249}] regionserver.HRegion(1676): Region close journal for e14a8686422d6ba50902d3f46d1fd225: Waiting for close lock at 1733400491342Running coprocessor pre-close hooks at 1733400491342Disabling compacts and flushes for region at 1733400491342Disabling writes for close at 1733400491342Writing region close event to WAL at 1733400491343 (+1 ms)Running coprocessor post-close hooks at 1733400491350 (+7 ms)Closed at 1733400491350 2024-12-05T12:08:11,350 DEBUG [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=250}] regionserver.HRegion(1676): Region close journal for a96226560936aca69cb372bbbf3e7979: Waiting for close lock at 1733400491343Running coprocessor pre-close hooks at 1733400491343Disabling compacts and flushes for region at 1733400491343Disabling writes for close at 1733400491343Writing region close event to WAL at 1733400491345 (+2 ms)Running coprocessor post-close hooks at 1733400491350 (+5 ms)Closed at 1733400491350 2024-12-05T12:08:11,352 INFO [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=249}] handler.UnassignRegionHandler(157): Closed e14a8686422d6ba50902d3f46d1fd225 2024-12-05T12:08:11,353 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=247 updating hbase:meta row=e14a8686422d6ba50902d3f46d1fd225, regionState=CLOSED 2024-12-05T12:08:11,353 INFO [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION, pid=250}] handler.UnassignRegionHandler(157): Closed a96226560936aca69cb372bbbf3e7979 2024-12-05T12:08:11,354 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=248 updating hbase:meta row=a96226560936aca69cb372bbbf3e7979, regionState=CLOSED 2024-12-05T12:08:11,355 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=249, ppid=247, state=RUNNABLE, hasLock=false; CloseRegionProcedure e14a8686422d6ba50902d3f46d1fd225, server=80666a11a09f,42305,1733400131012 because future has completed 2024-12-05T12:08:11,355 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=250, ppid=248, state=RUNNABLE, hasLock=false; CloseRegionProcedure a96226560936aca69cb372bbbf3e7979, server=80666a11a09f,42913,1733400130845 because future has completed 2024-12-05T12:08:11,356 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=249, resume processing ppid=247 2024-12-05T12:08:11,356 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=249, ppid=247, state=SUCCESS, hasLock=false; CloseRegionProcedure e14a8686422d6ba50902d3f46d1fd225, server=80666a11a09f,42305,1733400131012 in 166 msec 2024-12-05T12:08:11,357 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=250, resume processing ppid=248 2024-12-05T12:08:11,357 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=250, ppid=248, state=SUCCESS, hasLock=false; CloseRegionProcedure a96226560936aca69cb372bbbf3e7979, server=80666a11a09f,42913,1733400130845 in 166 msec 2024-12-05T12:08:11,357 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=247, ppid=246, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=e14a8686422d6ba50902d3f46d1fd225, UNASSIGN in 170 msec 2024-12-05T12:08:11,358 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=248, resume processing ppid=246 2024-12-05T12:08:11,358 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=248, ppid=246, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=a96226560936aca69cb372bbbf3e7979, UNASSIGN in 171 msec 2024-12-05T12:08:11,360 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=246, resume processing ppid=245 2024-12-05T12:08:11,360 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=246, ppid=245, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportFileSystemStateWithSkipTmp in 174 msec 2024-12-05T12:08:11,361 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSkipTmp","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733400491360"}]},"ts":"1733400491360"} 2024-12-05T12:08:11,362 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithSkipTmp, state=DISABLED in hbase:meta 2024-12-05T12:08:11,362 INFO [PEWorker-5 {}] procedure.DisableTableProcedure(296): Set testtb-testExportFileSystemStateWithSkipTmp to state=DISABLED 2024-12-05T12:08:11,363 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=245, state=SUCCESS, hasLock=false; DisableTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp in 181 msec 2024-12-05T12:08:11,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=245 2024-12-05T12:08:11,499 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testtb-testExportFileSystemStateWithSkipTmp completed 2024-12-05T12:08:11,500 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.2 delete testtb-testExportFileSystemStateWithSkipTmp 2024-12-05T12:08:11,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] procedure2.ProcedureExecutor(1139): Stored pid=251, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp 2024-12-05T12:08:11,505 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=251, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp 2024-12-05T12:08:11,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] access.PermissionStorage(261): Removing permissions of removed table testtb-testExportFileSystemStateWithSkipTmp 2024-12-05T12:08:11,507 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=251, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp 2024-12-05T12:08:11,509 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42305 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testtb-testExportFileSystemStateWithSkipTmp 2024-12-05T12:08:11,510 DEBUG [HFileArchiver-26 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportFileSystemStateWithSkipTmp/e14a8686422d6ba50902d3f46d1fd225 2024-12-05T12:08:11,510 DEBUG [HFileArchiver-27 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportFileSystemStateWithSkipTmp/a96226560936aca69cb372bbbf3e7979 2024-12-05T12:08:11,512 DEBUG [HFileArchiver-27 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportFileSystemStateWithSkipTmp/a96226560936aca69cb372bbbf3e7979/cf, FileablePath, hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportFileSystemStateWithSkipTmp/a96226560936aca69cb372bbbf3e7979/recovered.edits] 2024-12-05T12:08:11,512 DEBUG [HFileArchiver-26 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportFileSystemStateWithSkipTmp/e14a8686422d6ba50902d3f46d1fd225/cf, FileablePath, hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportFileSystemStateWithSkipTmp/e14a8686422d6ba50902d3f46d1fd225/recovered.edits] 2024-12-05T12:08:11,514 DEBUG [HFileArchiver-26 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportFileSystemStateWithSkipTmp/e14a8686422d6ba50902d3f46d1fd225/cf/9964ae11cd484194847a957cc3b8ca20 to hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/archive/data/default/testtb-testExportFileSystemStateWithSkipTmp/e14a8686422d6ba50902d3f46d1fd225/cf/9964ae11cd484194847a957cc3b8ca20 2024-12-05T12:08:11,514 DEBUG [HFileArchiver-27 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportFileSystemStateWithSkipTmp/a96226560936aca69cb372bbbf3e7979/cf/309759b2cbf24b5aa5ec1d4a73736ede to hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/archive/data/default/testtb-testExportFileSystemStateWithSkipTmp/a96226560936aca69cb372bbbf3e7979/cf/309759b2cbf24b5aa5ec1d4a73736ede 2024-12-05T12:08:11,516 DEBUG [HFileArchiver-26 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportFileSystemStateWithSkipTmp/e14a8686422d6ba50902d3f46d1fd225/recovered.edits/9.seqid to hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/archive/data/default/testtb-testExportFileSystemStateWithSkipTmp/e14a8686422d6ba50902d3f46d1fd225/recovered.edits/9.seqid 2024-12-05T12:08:11,516 DEBUG [HFileArchiver-27 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportFileSystemStateWithSkipTmp/a96226560936aca69cb372bbbf3e7979/recovered.edits/9.seqid to hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/archive/data/default/testtb-testExportFileSystemStateWithSkipTmp/a96226560936aca69cb372bbbf3e7979/recovered.edits/9.seqid 2024-12-05T12:08:11,517 DEBUG [HFileArchiver-27 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportFileSystemStateWithSkipTmp/a96226560936aca69cb372bbbf3e7979 2024-12-05T12:08:11,517 DEBUG [HFileArchiver-26 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testtb-testExportFileSystemStateWithSkipTmp/e14a8686422d6ba50902d3f46d1fd225 2024-12-05T12:08:11,517 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(313): Archived testtb-testExportFileSystemStateWithSkipTmp regions 2024-12-05T12:08:11,517 DEBUG [PEWorker-3 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/mobdir/data/default/testtb-testExportFileSystemStateWithSkipTmp/e9516f04c7d2974b21addb190acc6c0a 2024-12-05T12:08:11,518 DEBUG [PEWorker-3 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/mobdir/data/default/testtb-testExportFileSystemStateWithSkipTmp/e9516f04c7d2974b21addb190acc6c0a/cf] 2024-12-05T12:08:11,520 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/mobdir/data/default/testtb-testExportFileSystemStateWithSkipTmp/e9516f04c7d2974b21addb190acc6c0a/cf/c4ca4238a0b923820dcc509a6f75849b20241205b7d0b3b084d04758a114437af1f734cf_a96226560936aca69cb372bbbf3e7979 to hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/archive/data/default/testtb-testExportFileSystemStateWithSkipTmp/e9516f04c7d2974b21addb190acc6c0a/cf/c4ca4238a0b923820dcc509a6f75849b20241205b7d0b3b084d04758a114437af1f734cf_a96226560936aca69cb372bbbf3e7979 2024-12-05T12:08:11,522 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/mobdir/data/default/testtb-testExportFileSystemStateWithSkipTmp/e9516f04c7d2974b21addb190acc6c0a/cf/d41d8cd98f00b204e9800998ecf8427e20241205f39cc0e9ff40426b87a2b20ffcead016_e14a8686422d6ba50902d3f46d1fd225 to hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/archive/data/default/testtb-testExportFileSystemStateWithSkipTmp/e9516f04c7d2974b21addb190acc6c0a/cf/d41d8cd98f00b204e9800998ecf8427e20241205f39cc0e9ff40426b87a2b20ffcead016_e14a8686422d6ba50902d3f46d1fd225 2024-12-05T12:08:11,522 DEBUG [PEWorker-3 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/mobdir/data/default/testtb-testExportFileSystemStateWithSkipTmp/e9516f04c7d2974b21addb190acc6c0a 2024-12-05T12:08:11,524 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=251, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp 2024-12-05T12:08:11,527 WARN [PEWorker-3 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 2 rows of testtb-testExportFileSystemStateWithSkipTmp from hbase:meta 2024-12-05T12:08:11,529 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(407): Removing 'testtb-testExportFileSystemStateWithSkipTmp' descriptor. 2024-12-05T12:08:11,530 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=251, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp 2024-12-05T12:08:11,530 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(397): Removing 'testtb-testExportFileSystemStateWithSkipTmp' from region states. 2024-12-05T12:08:11,530 DEBUG [PEWorker-3 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSkipTmp,,1733400473253.e14a8686422d6ba50902d3f46d1fd225.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733400491530"}]},"ts":"9223372036854775807"} 2024-12-05T12:08:11,530 DEBUG [PEWorker-3 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSkipTmp,1,1733400473253.a96226560936aca69cb372bbbf3e7979.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733400491530"}]},"ts":"9223372036854775807"} 2024-12-05T12:08:11,533 INFO [PEWorker-3 {}] assignment.RegionStateStore(562): Deleted 2 regions from META 2024-12-05T12:08:11,533 DEBUG [PEWorker-3 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => e14a8686422d6ba50902d3f46d1fd225, NAME => 'testtb-testExportFileSystemStateWithSkipTmp,,1733400473253.e14a8686422d6ba50902d3f46d1fd225.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => a96226560936aca69cb372bbbf3e7979, NAME => 'testtb-testExportFileSystemStateWithSkipTmp,1,1733400473253.a96226560936aca69cb372bbbf3e7979.', STARTKEY => '1', ENDKEY => ''}] 2024-12-05T12:08:11,533 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(401): Marking 'testtb-testExportFileSystemStateWithSkipTmp' as deleted. 2024-12-05T12:08:11,533 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSkipTmp","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733400491533"}]},"ts":"9223372036854775807"} 2024-12-05T12:08:11,535 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(867): Deleted table testtb-testExportFileSystemStateWithSkipTmp state from META 2024-12-05T12:08:11,535 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(133): Finished pid=251, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp 2024-12-05T12:08:11,537 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=251, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp in 35 msec 2024-12-05T12:08:11,559 DEBUG [pool-63-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42913-0x101a6a890970001, quorum=127.0.0.1:49240, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSkipTmp 2024-12-05T12:08:11,559 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42305-0x101a6a890970003, quorum=127.0.0.1:49240, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSkipTmp 2024-12-05T12:08:11,559 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34947-0x101a6a890970000, quorum=127.0.0.1:49240, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSkipTmp 2024-12-05T12:08:11,559 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37629-0x101a6a890970002, quorum=127.0.0.1:49240, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSkipTmp 2024-12-05T12:08:11,561 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSkipTmp with data PBUF 2024-12-05T12:08:11,561 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSkipTmp with data PBUF 2024-12-05T12:08:11,561 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSkipTmp with data PBUF 2024-12-05T12:08:11,561 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSkipTmp with data PBUF 2024-12-05T12:08:11,568 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34947-0x101a6a890970000, quorum=127.0.0.1:49240, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSkipTmp 2024-12-05T12:08:11,568 DEBUG [pool-63-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42913-0x101a6a890970001, quorum=127.0.0.1:49240, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSkipTmp 2024-12-05T12:08:11,568 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42305-0x101a6a890970003, quorum=127.0.0.1:49240, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSkipTmp 2024-12-05T12:08:11,568 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37629-0x101a6a890970002, quorum=127.0.0.1:49240, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSkipTmp 2024-12-05T12:08:11,568 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34947-0x101a6a890970000, quorum=127.0.0.1:49240, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-05T12:08:11,568 DEBUG [pool-63-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42913-0x101a6a890970001, quorum=127.0.0.1:49240, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-05T12:08:11,568 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42305-0x101a6a890970003, quorum=127.0.0.1:49240, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-05T12:08:11,568 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37629-0x101a6a890970002, quorum=127.0.0.1:49240, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-05T12:08:11,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=251 2024-12-05T12:08:11,570 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-05T12:08:11,570 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-05T12:08:11,570 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-05T12:08:11,570 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-05T12:08:11,570 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testtb-testExportFileSystemStateWithSkipTmp 2024-12-05T12:08:11,571 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testtb-testExportFileSystemStateWithSkipTmp completed 2024-12-05T12:08:11,576 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "emptySnaptb0-testExportFileSystemStateWithSkipTmp" type: DISABLED 2024-12-05T12:08:11,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] snapshot.SnapshotManager(381): Deleting snapshot: emptySnaptb0-testExportFileSystemStateWithSkipTmp 2024-12-05T12:08:11,578 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "snaptb0-testExportFileSystemStateWithSkipTmp" type: DISABLED 2024-12-05T12:08:11,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947 {}] snapshot.SnapshotManager(381): Deleting snapshot: snaptb0-testExportFileSystemStateWithSkipTmp 2024-12-05T12:08:11,600 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestMobSecureExportSnapshot#testExportFileSystemStateWithSkipTmp Thread=828 (was 823) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-954888707_22 at /127.0.0.1:53642 [Waiting for operation #4] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #20 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1055630025) connection to localhost/127.0.0.1:39915 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: HFileArchiver-27 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-954888707_22 at /127.0.0.1:60094 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #19 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-26 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: process reaper (pid 165272) java.base@17.0.11/java.lang.ProcessHandleImpl.waitForProcessExit0(Native Method) java.base@17.0.11/java.lang.ProcessHandleImpl$1.run(ProcessHandleImpl.java:150) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-524751119_1 at /127.0.0.1:59892 [Waiting for operation #3] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-954888707_22 at /127.0.0.1:59924 [Waiting for operation #6] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:39915 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Thread-9921 java.base@17.0.11/java.io.FileInputStream.readBytes(Native Method) java.base@17.0.11/java.io.FileInputStream.read(FileInputStream.java:276) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:282) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/sun.nio.cs.StreamDecoder.readBytes(StreamDecoder.java:281) java.base@17.0.11/sun.nio.cs.StreamDecoder.implRead(StreamDecoder.java:324) java.base@17.0.11/sun.nio.cs.StreamDecoder.read(StreamDecoder.java:189) java.base@17.0.11/java.io.InputStreamReader.read(InputStreamReader.java:177) java.base@17.0.11/java.io.BufferedReader.fill(BufferedReader.java:162) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:329) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:396) app//org.apache.hadoop.util.Shell$1.run(Shell.java:1025) - Thread LEAK? -, OpenFileDescriptor=818 (was 818), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=783 (was 862), ProcessCount=19 (was 21), AvailableMemoryMB=2109 (was 1885) - AvailableMemoryMB LEAK? - 2024-12-05T12:08:11,600 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=828 is superior to 500 2024-12-05T12:08:11,600 INFO [Time-limited test {}] hbase.HBaseTestingUtil(2377): Stopping mini mapreduce cluster... 2024-12-05T12:08:11,607 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@827304f{node,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/node} 2024-12-05T12:08:11,610 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@1bf118dd{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-05T12:08:11,610 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-05T12:08:11,610 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@514d5141{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/static,STOPPED} 2024-12-05T12:08:11,610 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@67e1cde8{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/b25dcafd-a00c-c924-93f8-4d057d2ca201/hadoop.log.dir/,STOPPED} 2024-12-05T12:08:11,621 WARN [ContainersLauncher #4 {}] nodemanager.DefaultContainerExecutor(360): Exit code from container container_1733400137807_0011_01_000001 is : 143 2024-12-05T12:08:11,632 WARN [ContainersLauncher #5 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_693627247/yarn-7091037402/MiniMRCluster_693627247-localDir-nm-1_2/usercache/jenkins/appcache/application_1733400137807_0011/container_1733400137807_0011_01_000001/launch_container.sh] 2024-12-05T12:08:11,633 WARN [ContainersLauncher #5 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_693627247/yarn-7091037402/MiniMRCluster_693627247-localDir-nm-1_2/usercache/jenkins/appcache/application_1733400137807_0011/container_1733400137807_0011_01_000001/container_tokens] 2024-12-05T12:08:11,633 WARN [ContainersLauncher #5 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_693627247/yarn-7091037402/MiniMRCluster_693627247-localDir-nm-1_2/usercache/jenkins/appcache/application_1733400137807_0011/container_1733400137807_0011_01_000001/sysfs] 2024-12-05T12:08:13,814 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_693627247/yarn-7091037402/MiniMRCluster_693627247-localDir-nm-1_3/usercache/jenkins/appcache/application_1733400137807_0011/container_1733400137807_0011_01_000003/launch_container.sh] 2024-12-05T12:08:13,814 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_693627247/yarn-7091037402/MiniMRCluster_693627247-localDir-nm-1_3/usercache/jenkins/appcache/application_1733400137807_0011/container_1733400137807_0011_01_000003/container_tokens] 2024-12-05T12:08:13,815 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_693627247/yarn-7091037402/MiniMRCluster_693627247-localDir-nm-1_3/usercache/jenkins/appcache/application_1733400137807_0011/container_1733400137807_0011_01_000003/sysfs] 2024-12-05T12:08:14,403 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 02119c1ced80736cbd0ab02ca19ee30c, had cached 0 bytes from a total of 5791 2024-12-05T12:08:16,886 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-05T12:08:20,361 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithSkipTmp 2024-12-05T12:08:25,865 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-05T12:08:28,628 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@74d24973{node,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/node} 2024-12-05T12:08:28,629 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@31d1c277{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-05T12:08:28,629 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-05T12:08:28,629 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@26e39968{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/static,STOPPED} 2024-12-05T12:08:28,629 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4659cb26{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/b25dcafd-a00c-c924-93f8-4d057d2ca201/hadoop.log.dir/,STOPPED} 2024-12-05T12:08:38,983 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-05T12:08:42,355 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region e2d55a422a8d6cd55d6d7cbfae79e377, had cached 0 bytes from a total of 14661 2024-12-05T12:08:42,355 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region d819fc7dca4529788a4ca3a58e3fe9e5, had cached 0 bytes from a total of 5888 2024-12-05T12:08:45,644 ERROR [Thread[Thread-402,5,FailOnTimeoutGroup] {}] delegation.AbstractDelegationTokenSecretManager$ExpiredTokenRemover(852): ExpiredTokenRemover received java.lang.InterruptedException: sleep interrupted 2024-12-05T12:08:45,645 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@2922a925{cluster,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/cluster} 2024-12-05T12:08:45,645 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@78b8974f{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-05T12:08:45,645 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-05T12:08:45,646 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7c3b81b7{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/static,STOPPED} 2024-12-05T12:08:45,646 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@59ddc6ea{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/b25dcafd-a00c-c924-93f8-4d057d2ca201/hadoop.log.dir/,STOPPED} 2024-12-05T12:08:45,650 WARN [ApplicationMaster Launcher {}] amlauncher.ApplicationMasterLauncher$LauncherThread(122): org.apache.hadoop.yarn.server.resourcemanager.amlauncher.ApplicationMasterLauncher$LauncherThread interrupted. Returning. 2024-12-05T12:08:45,654 ERROR [SchedulerEventDispatcher:Event Processor {}] event.EventDispatcher$EventProcessor(72): Returning, interrupted : java.lang.InterruptedException 2024-12-05T12:08:45,654 ERROR [ResourceManager Event Processor Monitor {}] resourcemanager.ResourceManager$SchedulerEventDispatcher$EventProcessorMonitor(1193): Returning, interrupted : java.lang.InterruptedException: sleep interrupted 2024-12-05T12:08:45,656 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073741830_1006 (size=1172707) 2024-12-05T12:08:45,656 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073741830_1006 (size=1172707) 2024-12-05T12:08:45,662 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073741830_1006 (size=1172707) 2024-12-05T12:08:45,663 ERROR [Thread[Thread-425,5,FailOnTimeoutGroup] {}] delegation.AbstractDelegationTokenSecretManager$ExpiredTokenRemover(852): ExpiredTokenRemover received java.lang.InterruptedException: sleep interrupted 2024-12-05T12:08:45,665 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@628fba2{jobhistory,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/jobhistory} 2024-12-05T12:08:45,666 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@adf2375{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-05T12:08:45,666 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-05T12:08:45,666 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4846b1a3{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/static,STOPPED} 2024-12-05T12:08:45,666 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@cf9b147{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/b25dcafd-a00c-c924-93f8-4d057d2ca201/hadoop.log.dir/,STOPPED} 2024-12-05T12:08:45,667 ERROR [Thread[Thread-384,5,FailOnTimeoutGroup] {}] delegation.AbstractDelegationTokenSecretManager$ExpiredTokenRemover(852): ExpiredTokenRemover received java.lang.InterruptedException: sleep interrupted 2024-12-05T12:08:45,667 INFO [Time-limited test {}] hbase.HBaseTestingUtil(2380): Mini mapreduce cluster stopped 2024-12-05T12:08:45,667 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-12-05T12:08:45,667 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-05T12:08:45,668 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.snapshot.TestExportSnapshot.tearDownAfterClass(TestExportSnapshot.java:123) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-05T12:08:45,668 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T12:08:45,668 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T12:08:45,668 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-05T12:08:45,668 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1831340996, stopped=false 2024-12-05T12:08:45,668 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-05T12:08:45,668 DEBUG [Time-limited test {}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-05T12:08:45,668 DEBUG [Time-limited test {}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.SecureTestUtil$MasterSyncObserver 2024-12-05T12:08:45,668 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=80666a11a09f,34947,1733400129965 2024-12-05T12:08:45,718 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34947-0x101a6a890970000, quorum=127.0.0.1:49240, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-05T12:08:45,718 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42305-0x101a6a890970003, quorum=127.0.0.1:49240, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-05T12:08:45,718 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37629-0x101a6a890970002, quorum=127.0.0.1:49240, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-05T12:08:45,718 DEBUG [pool-63-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42913-0x101a6a890970001, quorum=127.0.0.1:49240, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-05T12:08:45,718 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34947-0x101a6a890970000, quorum=127.0.0.1:49240, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T12:08:45,718 DEBUG [pool-63-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42913-0x101a6a890970001, quorum=127.0.0.1:49240, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T12:08:45,718 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42305-0x101a6a890970003, quorum=127.0.0.1:49240, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T12:08:45,718 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37629-0x101a6a890970002, quorum=127.0.0.1:49240, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T12:08:45,718 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-05T12:08:45,718 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:37629-0x101a6a890970002, quorum=127.0.0.1:49240, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-05T12:08:45,718 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:42305-0x101a6a890970003, quorum=127.0.0.1:49240, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-05T12:08:45,719 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:34947-0x101a6a890970000, quorum=127.0.0.1:49240, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-05T12:08:45,719 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:42913-0x101a6a890970001, quorum=127.0.0.1:49240, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-05T12:08:45,719 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-05T12:08:45,719 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.snapshot.TestExportSnapshot.tearDownAfterClass(TestExportSnapshot.java:123) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-05T12:08:45,719 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T12:08:45,720 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '80666a11a09f,42913,1733400130845' ***** 2024-12-05T12:08:45,720 DEBUG [Time-limited test {}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-05T12:08:45,720 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-05T12:08:45,720 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '80666a11a09f,37629,1733400130958' ***** 2024-12-05T12:08:45,720 DEBUG [Time-limited test {}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-05T12:08:45,720 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-05T12:08:45,720 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '80666a11a09f,42305,1733400131012' ***** 2024-12-05T12:08:45,720 DEBUG [Time-limited test {}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-05T12:08:45,720 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-05T12:08:45,720 INFO [RS:0;80666a11a09f:42913 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-05T12:08:45,720 INFO [RS:1;80666a11a09f:37629 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-05T12:08:45,720 INFO [RS:0;80666a11a09f:42913 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-05T12:08:45,720 INFO [RS:1;80666a11a09f:37629 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-05T12:08:45,720 INFO [RS:1;80666a11a09f:37629 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-05T12:08:45,720 INFO [RS:0;80666a11a09f:42913 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-05T12:08:45,720 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-05T12:08:45,720 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-05T12:08:45,720 INFO [RS:1;80666a11a09f:37629 {}] regionserver.HRegionServer(959): stopping server 80666a11a09f,37629,1733400130958 2024-12-05T12:08:45,720 INFO [RS:1;80666a11a09f:37629 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-05T12:08:45,720 INFO [RS:0;80666a11a09f:42913 {}] regionserver.HRegionServer(3091): Received CLOSE for e2d55a422a8d6cd55d6d7cbfae79e377 2024-12-05T12:08:45,720 INFO [RS:1;80666a11a09f:37629 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:1;80666a11a09f:37629. 2024-12-05T12:08:45,721 DEBUG [RS:1;80666a11a09f:37629 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-05T12:08:45,721 DEBUG [RS:1;80666a11a09f:37629 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T12:08:45,721 INFO [RS:1;80666a11a09f:37629 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-05T12:08:45,721 INFO [RS:1;80666a11a09f:37629 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-05T12:08:45,721 INFO [RS:1;80666a11a09f:37629 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-05T12:08:45,721 INFO [RS:0;80666a11a09f:42913 {}] regionserver.HRegionServer(959): stopping server 80666a11a09f,42913,1733400130845 2024-12-05T12:08:45,721 INFO [RS:0;80666a11a09f:42913 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-05T12:08:45,721 INFO [RS:1;80666a11a09f:37629 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-12-05T12:08:45,721 INFO [RS:0;80666a11a09f:42913 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;80666a11a09f:42913. 2024-12-05T12:08:45,721 DEBUG [RS:0;80666a11a09f:42913 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-05T12:08:45,721 DEBUG [RS:0;80666a11a09f:42913 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T12:08:45,721 INFO [RS:2;80666a11a09f:42305 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-05T12:08:45,721 INFO [RS:0;80666a11a09f:42913 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-12-05T12:08:45,721 INFO [RS:1;80666a11a09f:37629 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-12-05T12:08:45,721 INFO [RS:2;80666a11a09f:42305 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-05T12:08:45,721 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-05T12:08:45,721 INFO [RS:2;80666a11a09f:42305 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-05T12:08:45,721 DEBUG [RS:1;80666a11a09f:37629 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-12-05T12:08:45,721 DEBUG [RS:0;80666a11a09f:42913 {}] regionserver.HRegionServer(1325): Online Regions={e2d55a422a8d6cd55d6d7cbfae79e377=testExportExpiredSnapshot,1,1733400387009.e2d55a422a8d6cd55d6d7cbfae79e377.} 2024-12-05T12:08:45,721 INFO [RS:2;80666a11a09f:42305 {}] regionserver.HRegionServer(3091): Received CLOSE for 02119c1ced80736cbd0ab02ca19ee30c 2024-12-05T12:08:45,721 INFO [RS:2;80666a11a09f:42305 {}] regionserver.HRegionServer(3091): Received CLOSE for d819fc7dca4529788a4ca3a58e3fe9e5 2024-12-05T12:08:45,721 INFO [RS:2;80666a11a09f:42305 {}] regionserver.HRegionServer(959): stopping server 80666a11a09f,42305,1733400131012 2024-12-05T12:08:45,721 INFO [RS:2;80666a11a09f:42305 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-05T12:08:45,721 DEBUG [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing e2d55a422a8d6cd55d6d7cbfae79e377, disabling compactions & flushes 2024-12-05T12:08:45,721 DEBUG [RS_CLOSE_META-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-05T12:08:45,721 INFO [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region testExportExpiredSnapshot,1,1733400387009.e2d55a422a8d6cd55d6d7cbfae79e377. 2024-12-05T12:08:45,721 INFO [RS_CLOSE_META-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-05T12:08:45,721 INFO [RS:2;80666a11a09f:42305 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:2;80666a11a09f:42305. 2024-12-05T12:08:45,721 DEBUG [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on testExportExpiredSnapshot,1,1733400387009.e2d55a422a8d6cd55d6d7cbfae79e377. 2024-12-05T12:08:45,721 DEBUG [RS:1;80666a11a09f:37629 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-12-05T12:08:45,721 DEBUG [RS_CLOSE_META-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-05T12:08:45,721 DEBUG [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on testExportExpiredSnapshot,1,1733400387009.e2d55a422a8d6cd55d6d7cbfae79e377. after waiting 0 ms 2024-12-05T12:08:45,721 DEBUG [RS_CLOSE_META-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-05T12:08:45,721 DEBUG [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region testExportExpiredSnapshot,1,1733400387009.e2d55a422a8d6cd55d6d7cbfae79e377. 2024-12-05T12:08:45,721 DEBUG [RS:2;80666a11a09f:42305 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-05T12:08:45,721 DEBUG [RS_CLOSE_META-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-05T12:08:45,721 DEBUG [RS:2;80666a11a09f:42305 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T12:08:45,721 INFO [RS:2;80666a11a09f:42305 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-12-05T12:08:45,722 DEBUG [RS:2;80666a11a09f:42305 {}] regionserver.HRegionServer(1325): Online Regions={02119c1ced80736cbd0ab02ca19ee30c=hbase:acl,,1733400133831.02119c1ced80736cbd0ab02ca19ee30c., d819fc7dca4529788a4ca3a58e3fe9e5=testExportExpiredSnapshot,,1733400387009.d819fc7dca4529788a4ca3a58e3fe9e5.} 2024-12-05T12:08:45,722 DEBUG [RS:2;80666a11a09f:42305 {}] regionserver.HRegionServer(1351): Waiting on 02119c1ced80736cbd0ab02ca19ee30c, d819fc7dca4529788a4ca3a58e3fe9e5 2024-12-05T12:08:45,722 INFO [RS_CLOSE_META-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=87.46 KB heapSize=138.27 KB 2024-12-05T12:08:45,722 DEBUG [RS:0;80666a11a09f:42913 {}] regionserver.HRegionServer(1351): Waiting on e2d55a422a8d6cd55d6d7cbfae79e377 2024-12-05T12:08:45,722 DEBUG [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 02119c1ced80736cbd0ab02ca19ee30c, disabling compactions & flushes 2024-12-05T12:08:45,722 INFO [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region hbase:acl,,1733400133831.02119c1ced80736cbd0ab02ca19ee30c. 2024-12-05T12:08:45,722 DEBUG [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:acl,,1733400133831.02119c1ced80736cbd0ab02ca19ee30c. 2024-12-05T12:08:45,722 DEBUG [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on hbase:acl,,1733400133831.02119c1ced80736cbd0ab02ca19ee30c. after waiting 0 ms 2024-12-05T12:08:45,722 DEBUG [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region hbase:acl,,1733400133831.02119c1ced80736cbd0ab02ca19ee30c. 2024-12-05T12:08:45,722 INFO [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2902): Flushing 02119c1ced80736cbd0ab02ca19ee30c 1/1 column families, dataSize=190 B heapSize=672 B 2024-12-05T12:08:45,724 DEBUG [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testExportExpiredSnapshot/e2d55a422a8d6cd55d6d7cbfae79e377/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-12-05T12:08:45,724 DEBUG [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-05T12:08:45,725 INFO [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed testExportExpiredSnapshot,1,1733400387009.e2d55a422a8d6cd55d6d7cbfae79e377. 2024-12-05T12:08:45,725 DEBUG [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for e2d55a422a8d6cd55d6d7cbfae79e377: Waiting for close lock at 1733400525721Running coprocessor pre-close hooks at 1733400525721Disabling compacts and flushes for region at 1733400525721Disabling writes for close at 1733400525721Writing region close event to WAL at 1733400525722 (+1 ms)Running coprocessor post-close hooks at 1733400525724 (+2 ms)Closed at 1733400525724 2024-12-05T12:08:45,725 DEBUG [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed testExportExpiredSnapshot,1,1733400387009.e2d55a422a8d6cd55d6d7cbfae79e377. 2024-12-05T12:08:45,725 DEBUG [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/hbase/acl/02119c1ced80736cbd0ab02ca19ee30c/.tmp/l/454bc076893b4dee8418aed7fa124f44 is 68, key is testtb-testExportFileSystemStateWithSkipTmp/l:/1733400491508/DeleteFamily/seqid=0 2024-12-05T12:08:45,728 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742469_1645 (size=5142) 2024-12-05T12:08:45,728 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742469_1645 (size=5142) 2024-12-05T12:08:45,729 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742469_1645 (size=5142) 2024-12-05T12:08:45,729 INFO [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=190 B at sequenceid=34 (bloomFilter=false), to=hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/hbase/acl/02119c1ced80736cbd0ab02ca19ee30c/.tmp/l/454bc076893b4dee8418aed7fa124f44 2024-12-05T12:08:45,730 INFO [regionserver/80666a11a09f:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-05T12:08:45,733 INFO [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 454bc076893b4dee8418aed7fa124f44 2024-12-05T12:08:45,734 DEBUG [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/hbase/acl/02119c1ced80736cbd0ab02ca19ee30c/.tmp/l/454bc076893b4dee8418aed7fa124f44 as hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/hbase/acl/02119c1ced80736cbd0ab02ca19ee30c/l/454bc076893b4dee8418aed7fa124f44 2024-12-05T12:08:45,739 INFO [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 454bc076893b4dee8418aed7fa124f44 2024-12-05T12:08:45,739 INFO [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/hbase/acl/02119c1ced80736cbd0ab02ca19ee30c/l/454bc076893b4dee8418aed7fa124f44, entries=2, sequenceid=34, filesize=5.0 K 2024-12-05T12:08:45,740 INFO [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3140): Finished flush of dataSize ~190 B/190, heapSize ~656 B/656, currentSize=0 B/0 for 02119c1ced80736cbd0ab02ca19ee30c in 18ms, sequenceid=34, compaction requested=false 2024-12-05T12:08:45,743 DEBUG [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/hbase/acl/02119c1ced80736cbd0ab02ca19ee30c/recovered.edits/37.seqid, newMaxSeqId=37, maxSeqId=1 2024-12-05T12:08:45,744 DEBUG [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-05T12:08:45,744 INFO [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed hbase:acl,,1733400133831.02119c1ced80736cbd0ab02ca19ee30c. 2024-12-05T12:08:45,744 DEBUG [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 02119c1ced80736cbd0ab02ca19ee30c: Waiting for close lock at 1733400525722Running coprocessor pre-close hooks at 1733400525722Disabling compacts and flushes for region at 1733400525722Disabling writes for close at 1733400525722Obtaining lock to block concurrent updates at 1733400525722Preparing flush snapshotting stores in 02119c1ced80736cbd0ab02ca19ee30c at 1733400525722Finished memstore snapshotting hbase:acl,,1733400133831.02119c1ced80736cbd0ab02ca19ee30c., syncing WAL and waiting on mvcc, flushsize=dataSize=190, getHeapSize=656, getOffHeapSize=0, getCellsCount=3 at 1733400525722Flushing stores of hbase:acl,,1733400133831.02119c1ced80736cbd0ab02ca19ee30c. at 1733400525722Flushing 02119c1ced80736cbd0ab02ca19ee30c/l: creating writer at 1733400525723 (+1 ms)Flushing 02119c1ced80736cbd0ab02ca19ee30c/l: appending metadata at 1733400525724 (+1 ms)Flushing 02119c1ced80736cbd0ab02ca19ee30c/l: closing flushed file at 1733400525724Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@111aab11: reopening flushed file at 1733400525733 (+9 ms)Finished flush of dataSize ~190 B/190, heapSize ~656 B/656, currentSize=0 B/0 for 02119c1ced80736cbd0ab02ca19ee30c in 18ms, sequenceid=34, compaction requested=false at 1733400525740 (+7 ms)Writing region close event to WAL at 1733400525741 (+1 ms)Running coprocessor post-close hooks at 1733400525744 (+3 ms)Closed at 1733400525744 2024-12-05T12:08:45,744 DEBUG [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed hbase:acl,,1733400133831.02119c1ced80736cbd0ab02ca19ee30c. 2024-12-05T12:08:45,744 DEBUG [RS_CLOSE_META-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/hbase/meta/1588230740/.tmp/info/c181964d51a740fd9b528afb281a4aab is 173, key is testExportExpiredSnapshot,1,1733400387009.e2d55a422a8d6cd55d6d7cbfae79e377./info:regioninfo/1733400387364/Put/seqid=0 2024-12-05T12:08:45,744 DEBUG [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing d819fc7dca4529788a4ca3a58e3fe9e5, disabling compactions & flushes 2024-12-05T12:08:45,744 INFO [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region testExportExpiredSnapshot,,1733400387009.d819fc7dca4529788a4ca3a58e3fe9e5. 2024-12-05T12:08:45,744 DEBUG [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on testExportExpiredSnapshot,,1733400387009.d819fc7dca4529788a4ca3a58e3fe9e5. 2024-12-05T12:08:45,744 DEBUG [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on testExportExpiredSnapshot,,1733400387009.d819fc7dca4529788a4ca3a58e3fe9e5. after waiting 0 ms 2024-12-05T12:08:45,744 DEBUG [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region testExportExpiredSnapshot,,1733400387009.d819fc7dca4529788a4ca3a58e3fe9e5. 2024-12-05T12:08:45,751 DEBUG [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/default/testExportExpiredSnapshot/d819fc7dca4529788a4ca3a58e3fe9e5/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-12-05T12:08:45,752 DEBUG [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-05T12:08:45,752 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742470_1646 (size=15646) 2024-12-05T12:08:45,752 INFO [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed testExportExpiredSnapshot,,1733400387009.d819fc7dca4529788a4ca3a58e3fe9e5. 2024-12-05T12:08:45,752 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742470_1646 (size=15646) 2024-12-05T12:08:45,752 DEBUG [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for d819fc7dca4529788a4ca3a58e3fe9e5: Waiting for close lock at 1733400525744Running coprocessor pre-close hooks at 1733400525744Disabling compacts and flushes for region at 1733400525744Disabling writes for close at 1733400525744Writing region close event to WAL at 1733400525745 (+1 ms)Running coprocessor post-close hooks at 1733400525752 (+7 ms)Closed at 1733400525752 2024-12-05T12:08:45,752 DEBUG [RS_CLOSE_REGION-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed testExportExpiredSnapshot,,1733400387009.d819fc7dca4529788a4ca3a58e3fe9e5. 2024-12-05T12:08:45,753 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742470_1646 (size=15646) 2024-12-05T12:08:45,753 INFO [RS_CLOSE_META-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74.45 KB at sequenceid=240 (bloomFilter=true), to=hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/hbase/meta/1588230740/.tmp/info/c181964d51a740fd9b528afb281a4aab 2024-12-05T12:08:45,768 DEBUG [RS_CLOSE_META-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/hbase/meta/1588230740/.tmp/ns/2b7c31f5f2fd4fc7b1768e9094dd754a is 124, key is testtb-testExportFileSystemStateWithMergeRegion-1,,1733400366694.68a973778e2ec0a6c9355eecf138a149./ns:/1733400385175/DeleteFamily/seqid=0 2024-12-05T12:08:45,772 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742471_1647 (size=8378) 2024-12-05T12:08:45,773 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742471_1647 (size=8378) 2024-12-05T12:08:45,773 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742471_1647 (size=8378) 2024-12-05T12:08:45,773 INFO [RS_CLOSE_META-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=2.79 KB at sequenceid=240 (bloomFilter=true), to=hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/hbase/meta/1588230740/.tmp/ns/2b7c31f5f2fd4fc7b1768e9094dd754a 2024-12-05T12:08:45,789 DEBUG [RS_CLOSE_META-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/hbase/meta/1588230740/.tmp/rep_barrier/c7015ef6a26c4cbbb4b82e7f4c44054b is 133, key is testtb-testExportFileSystemStateWithMergeRegion-1,,1733400366694.68a973778e2ec0a6c9355eecf138a149./rep_barrier:/1733400385175/DeleteFamily/seqid=0 2024-12-05T12:08:45,793 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742472_1648 (size=8717) 2024-12-05T12:08:45,793 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742472_1648 (size=8717) 2024-12-05T12:08:45,793 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742472_1648 (size=8717) 2024-12-05T12:08:45,793 INFO [RS_CLOSE_META-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=2.95 KB at sequenceid=240 (bloomFilter=true), to=hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/hbase/meta/1588230740/.tmp/rep_barrier/c7015ef6a26c4cbbb4b82e7f4c44054b 2024-12-05T12:08:45,801 INFO [regionserver/80666a11a09f:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-12-05T12:08:45,801 INFO [regionserver/80666a11a09f:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-12-05T12:08:45,813 DEBUG [RS_CLOSE_META-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/hbase/meta/1588230740/.tmp/table/48d8722dc0d04918a6fcde65cf013a05 is 127, key is testtb-testExportFileSystemStateWithMergeRegion-1,,1733400366694.68a973778e2ec0a6c9355eecf138a149./table:/1733400385175/DeleteFamily/seqid=0 2024-12-05T12:08:45,814 INFO [regionserver/80666a11a09f:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-05T12:08:45,817 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742473_1649 (size=9531) 2024-12-05T12:08:45,817 INFO [regionserver/80666a11a09f:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-05T12:08:45,817 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742473_1649 (size=9531) 2024-12-05T12:08:45,818 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742473_1649 (size=9531) 2024-12-05T12:08:45,818 INFO [RS_CLOSE_META-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.27 KB at sequenceid=240 (bloomFilter=true), to=hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/hbase/meta/1588230740/.tmp/table/48d8722dc0d04918a6fcde65cf013a05 2024-12-05T12:08:45,822 DEBUG [RS_CLOSE_META-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/hbase/meta/1588230740/.tmp/info/c181964d51a740fd9b528afb281a4aab as hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/hbase/meta/1588230740/info/c181964d51a740fd9b528afb281a4aab 2024-12-05T12:08:45,827 INFO [RS_CLOSE_META-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/hbase/meta/1588230740/info/c181964d51a740fd9b528afb281a4aab, entries=84, sequenceid=240, filesize=15.3 K 2024-12-05T12:08:45,827 DEBUG [RS_CLOSE_META-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/hbase/meta/1588230740/.tmp/ns/2b7c31f5f2fd4fc7b1768e9094dd754a as hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/hbase/meta/1588230740/ns/2b7c31f5f2fd4fc7b1768e9094dd754a 2024-12-05T12:08:45,832 INFO [RS_CLOSE_META-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/hbase/meta/1588230740/ns/2b7c31f5f2fd4fc7b1768e9094dd754a, entries=28, sequenceid=240, filesize=8.2 K 2024-12-05T12:08:45,833 DEBUG [RS_CLOSE_META-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/hbase/meta/1588230740/.tmp/rep_barrier/c7015ef6a26c4cbbb4b82e7f4c44054b as hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/hbase/meta/1588230740/rep_barrier/c7015ef6a26c4cbbb4b82e7f4c44054b 2024-12-05T12:08:45,837 INFO [RS_CLOSE_META-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/hbase/meta/1588230740/rep_barrier/c7015ef6a26c4cbbb4b82e7f4c44054b, entries=26, sequenceid=240, filesize=8.5 K 2024-12-05T12:08:45,838 DEBUG [RS_CLOSE_META-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/hbase/meta/1588230740/.tmp/table/48d8722dc0d04918a6fcde65cf013a05 as hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/hbase/meta/1588230740/table/48d8722dc0d04918a6fcde65cf013a05 2024-12-05T12:08:45,842 INFO [RS_CLOSE_META-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/hbase/meta/1588230740/table/48d8722dc0d04918a6fcde65cf013a05, entries=43, sequenceid=240, filesize=9.3 K 2024-12-05T12:08:45,843 INFO [RS_CLOSE_META-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~87.46 KB/89560, heapSize ~138.20 KB/141520, currentSize=0 B/0 for 1588230740 in 122ms, sequenceid=240, compaction requested=false 2024-12-05T12:08:45,846 DEBUG [RS_CLOSE_META-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/data/hbase/meta/1588230740/recovered.edits/243.seqid, newMaxSeqId=243, maxSeqId=1 2024-12-05T12:08:45,847 DEBUG [RS_CLOSE_META-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-05T12:08:45,847 DEBUG [RS_CLOSE_META-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-05T12:08:45,847 INFO [RS_CLOSE_META-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-05T12:08:45,847 DEBUG [RS_CLOSE_META-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733400525721Running coprocessor pre-close hooks at 1733400525721Disabling compacts and flushes for region at 1733400525721Disabling writes for close at 1733400525721Obtaining lock to block concurrent updates at 1733400525722 (+1 ms)Preparing flush snapshotting stores in 1588230740 at 1733400525722Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=89560, getHeapSize=141520, getOffHeapSize=0, getCellsCount=676 at 1733400525722Flushing stores of hbase:meta,,1.1588230740 at 1733400525723 (+1 ms)Flushing 1588230740/info: creating writer at 1733400525723Flushing 1588230740/info: appending metadata at 1733400525744 (+21 ms)Flushing 1588230740/info: closing flushed file at 1733400525744Flushing 1588230740/ns: creating writer at 1733400525757 (+13 ms)Flushing 1588230740/ns: appending metadata at 1733400525768 (+11 ms)Flushing 1588230740/ns: closing flushed file at 1733400525768Flushing 1588230740/rep_barrier: creating writer at 1733400525776 (+8 ms)Flushing 1588230740/rep_barrier: appending metadata at 1733400525788 (+12 ms)Flushing 1588230740/rep_barrier: closing flushed file at 1733400525789 (+1 ms)Flushing 1588230740/table: creating writer at 1733400525797 (+8 ms)Flushing 1588230740/table: appending metadata at 1733400525813 (+16 ms)Flushing 1588230740/table: closing flushed file at 1733400525813Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@15370dea: reopening flushed file at 1733400525821 (+8 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@34ade7e7: reopening flushed file at 1733400525827 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@41a1cfc2: reopening flushed file at 1733400525832 (+5 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7ea5b3ed: reopening flushed file at 1733400525837 (+5 ms)Finished flush of dataSize ~87.46 KB/89560, heapSize ~138.20 KB/141520, currentSize=0 B/0 for 1588230740 in 122ms, sequenceid=240, compaction requested=false at 1733400525843 (+6 ms)Writing region close event to WAL at 1733400525844 (+1 ms)Running coprocessor post-close hooks at 1733400525847 (+3 ms)Closed at 1733400525847 2024-12-05T12:08:45,847 DEBUG [RS_CLOSE_META-regionserver/80666a11a09f:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-12-05T12:08:45,871 INFO [regionserver/80666a11a09f:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-12-05T12:08:45,872 INFO [regionserver/80666a11a09f:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-12-05T12:08:45,875 INFO [regionserver/80666a11a09f:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-12-05T12:08:45,875 INFO [regionserver/80666a11a09f:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-12-05T12:08:45,922 INFO [RS:1;80666a11a09f:37629 {}] regionserver.HRegionServer(976): stopping server 80666a11a09f,37629,1733400130958; all regions closed. 2024-12-05T12:08:45,922 INFO [RS:2;80666a11a09f:42305 {}] regionserver.HRegionServer(976): stopping server 80666a11a09f,42305,1733400131012; all regions closed. 2024-12-05T12:08:45,922 INFO [RS:0;80666a11a09f:42913 {}] regionserver.HRegionServer(976): stopping server 80666a11a09f,42913,1733400130845; all regions closed. 2024-12-05T12:08:45,928 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073741834_1010 (size=14801) 2024-12-05T12:08:45,928 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073741834_1010 (size=14801) 2024-12-05T12:08:45,928 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073741836_1012 (size=101978) 2024-12-05T12:08:45,929 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073741835_1011 (size=23473) 2024-12-05T12:08:45,929 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073741836_1012 (size=101978) 2024-12-05T12:08:45,929 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073741835_1011 (size=23473) 2024-12-05T12:08:45,929 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073741836_1012 (size=101978) 2024-12-05T12:08:45,929 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073741834_1010 (size=14801) 2024-12-05T12:08:45,929 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073741835_1011 (size=23473) 2024-12-05T12:08:45,932 DEBUG [RS:0;80666a11a09f:42913 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/oldWALs 2024-12-05T12:08:45,932 DEBUG [RS:1;80666a11a09f:37629 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/oldWALs 2024-12-05T12:08:45,932 DEBUG [RS:2;80666a11a09f:42305 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/oldWALs 2024-12-05T12:08:45,932 INFO [RS:1;80666a11a09f:37629 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL 80666a11a09f%2C37629%2C1733400130958.meta:.meta(num 1733400133421) 2024-12-05T12:08:45,932 INFO [RS:0;80666a11a09f:42913 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL 80666a11a09f%2C42913%2C1733400130845:(num 1733400132988) 2024-12-05T12:08:45,932 DEBUG [RS:0;80666a11a09f:42913 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T12:08:45,932 INFO [RS:2;80666a11a09f:42305 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL 80666a11a09f%2C42305%2C1733400131012:(num 1733400132985) 2024-12-05T12:08:45,932 INFO [RS:0;80666a11a09f:42913 {}] regionserver.LeaseManager(133): Closed leases 2024-12-05T12:08:45,932 DEBUG [RS:2;80666a11a09f:42305 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T12:08:45,932 INFO [RS:2;80666a11a09f:42305 {}] regionserver.LeaseManager(133): Closed leases 2024-12-05T12:08:45,932 INFO [RS:0;80666a11a09f:42913 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-05T12:08:45,932 INFO [RS:2;80666a11a09f:42305 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-05T12:08:45,932 INFO [RS:2;80666a11a09f:42305 {}] hbase.ChoreService(370): Chore service for: regionserver/80666a11a09f:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-05T12:08:45,932 INFO [RS:0;80666a11a09f:42913 {}] hbase.ChoreService(370): Chore service for: regionserver/80666a11a09f:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-05T12:08:45,932 INFO [RS:2;80666a11a09f:42305 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-05T12:08:45,932 INFO [RS:0;80666a11a09f:42913 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-05T12:08:45,932 INFO [RS:2;80666a11a09f:42305 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-05T12:08:45,932 INFO [RS:0;80666a11a09f:42913 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-05T12:08:45,932 INFO [regionserver/80666a11a09f:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-05T12:08:45,932 INFO [RS:0;80666a11a09f:42913 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-05T12:08:45,932 INFO [RS:2;80666a11a09f:42305 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-05T12:08:45,933 INFO [RS:0;80666a11a09f:42913 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-05T12:08:45,933 INFO [RS:2;80666a11a09f:42305 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-05T12:08:45,933 INFO [regionserver/80666a11a09f:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-05T12:08:45,933 INFO [RS:2;80666a11a09f:42305 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:42305 2024-12-05T12:08:45,934 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073741833_1009 (size=5996) 2024-12-05T12:08:45,934 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073741833_1009 (size=5996) 2024-12-05T12:08:45,934 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073741833_1009 (size=5996) 2024-12-05T12:08:45,935 INFO [RS:0;80666a11a09f:42913 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:42913 2024-12-05T12:08:45,938 DEBUG [RS:1;80666a11a09f:37629 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/oldWALs 2024-12-05T12:08:45,938 INFO [RS:1;80666a11a09f:37629 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL 80666a11a09f%2C37629%2C1733400130958:(num 1733400132981) 2024-12-05T12:08:45,938 DEBUG [RS:1;80666a11a09f:37629 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-05T12:08:45,938 INFO [RS:1;80666a11a09f:37629 {}] regionserver.LeaseManager(133): Closed leases 2024-12-05T12:08:45,938 INFO [RS:1;80666a11a09f:37629 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-05T12:08:45,938 INFO [RS:1;80666a11a09f:37629 {}] hbase.ChoreService(370): Chore service for: regionserver/80666a11a09f:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-05T12:08:45,938 INFO [RS:1;80666a11a09f:37629 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-05T12:08:45,938 INFO [regionserver/80666a11a09f:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-05T12:08:45,938 INFO [RS:1;80666a11a09f:37629 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:37629 2024-12-05T12:08:45,942 DEBUG [pool-63-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42913-0x101a6a890970001, quorum=127.0.0.1:49240, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/80666a11a09f,42913,1733400130845 2024-12-05T12:08:45,942 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34947-0x101a6a890970000, quorum=127.0.0.1:49240, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-05T12:08:45,942 INFO [RS:0;80666a11a09f:42913 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-05T12:08:45,951 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42305-0x101a6a890970003, quorum=127.0.0.1:49240, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/80666a11a09f,42305,1733400131012 2024-12-05T12:08:45,951 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37629-0x101a6a890970002, quorum=127.0.0.1:49240, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/80666a11a09f,37629,1733400130958 2024-12-05T12:08:45,951 INFO [RS:1;80666a11a09f:37629 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-05T12:08:45,951 INFO [RS:2;80666a11a09f:42305 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-05T12:08:45,959 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [80666a11a09f,42305,1733400131012] 2024-12-05T12:08:45,976 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/80666a11a09f,42305,1733400131012 already deleted, retry=false 2024-12-05T12:08:45,976 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 80666a11a09f,42305,1733400131012 expired; onlineServers=2 2024-12-05T12:08:45,976 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [80666a11a09f,37629,1733400130958] 2024-12-05T12:08:45,984 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/80666a11a09f,37629,1733400130958 already deleted, retry=false 2024-12-05T12:08:45,984 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 80666a11a09f,37629,1733400130958 expired; onlineServers=1 2024-12-05T12:08:45,984 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [80666a11a09f,42913,1733400130845] 2024-12-05T12:08:45,992 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/80666a11a09f,42913,1733400130845 already deleted, retry=false 2024-12-05T12:08:45,993 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 80666a11a09f,42913,1733400130845 expired; onlineServers=0 2024-12-05T12:08:45,993 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '80666a11a09f,34947,1733400129965' ***** 2024-12-05T12:08:45,993 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-05T12:08:45,993 INFO [M:0;80666a11a09f:34947 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-05T12:08:45,993 INFO [M:0;80666a11a09f:34947 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-05T12:08:45,993 DEBUG [M:0;80666a11a09f:34947 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-05T12:08:45,993 DEBUG [M:0;80666a11a09f:34947 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-05T12:08:45,994 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-05T12:08:45,994 DEBUG [master/80666a11a09f:0:becomeActiveMaster-HFileCleaner.large.0-1733400132468 {}] cleaner.HFileCleaner(306): Exit Thread[master/80666a11a09f:0:becomeActiveMaster-HFileCleaner.large.0-1733400132468,5,FailOnTimeoutGroup] 2024-12-05T12:08:45,994 DEBUG [master/80666a11a09f:0:becomeActiveMaster-HFileCleaner.small.0-1733400132483 {}] cleaner.HFileCleaner(306): Exit Thread[master/80666a11a09f:0:becomeActiveMaster-HFileCleaner.small.0-1733400132483,5,FailOnTimeoutGroup] 2024-12-05T12:08:45,994 INFO [M:0;80666a11a09f:34947 {}] hbase.ChoreService(370): Chore service for: master/80666a11a09f:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-12-05T12:08:45,994 INFO [M:0;80666a11a09f:34947 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-05T12:08:45,994 DEBUG [M:0;80666a11a09f:34947 {}] master.HMaster(1795): Stopping service threads 2024-12-05T12:08:45,994 INFO [M:0;80666a11a09f:34947 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-05T12:08:45,994 INFO [M:0;80666a11a09f:34947 {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-05T12:08:45,995 INFO [M:0;80666a11a09f:34947 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-05T12:08:45,995 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-05T12:08:46,001 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34947-0x101a6a890970000, quorum=127.0.0.1:49240, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-05T12:08:46,001 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34947-0x101a6a890970000, quorum=127.0.0.1:49240, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-05T12:08:46,001 DEBUG [M:0;80666a11a09f:34947 {}] zookeeper.ZKUtil(347): master:34947-0x101a6a890970000, quorum=127.0.0.1:49240, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-12-05T12:08:46,002 WARN [M:0;80666a11a09f:34947 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-12-05T12:08:46,003 INFO [M:0;80666a11a09f:34947 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/.lastflushedseqids 2024-12-05T12:08:46,018 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35565 is added to blk_1073742474_1650 (size=320) 2024-12-05T12:08:46,018 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39603 is added to blk_1073742474_1650 (size=320) 2024-12-05T12:08:46,018 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37713 is added to blk_1073742474_1650 (size=320) 2024-12-05T12:08:46,019 INFO [M:0;80666a11a09f:34947 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-12-05T12:08:46,019 INFO [M:0;80666a11a09f:34947 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-05T12:08:46,019 DEBUG [M:0;80666a11a09f:34947 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-05T12:08:46,032 INFO [M:0;80666a11a09f:34947 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-05T12:08:46,032 DEBUG [M:0;80666a11a09f:34947 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-05T12:08:46,032 DEBUG [M:0;80666a11a09f:34947 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-05T12:08:46,032 DEBUG [M:0;80666a11a09f:34947 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-05T12:08:46,032 INFO [M:0;80666a11a09f:34947 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=998.05 KB heapSize=1.17 MB 2024-12-05T12:08:46,033 ERROR [AsyncFSWAL-0-hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/MasterData-prefix:80666a11a09f,34947,1733400129965 {}] server.NIOServerCnxnFactory(85): Thread Thread[AsyncFSWAL-0-hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/MasterData-prefix:80666a11a09f,34947,1733400129965,5,FailOnTimeoutGroup] died java.lang.NullPointerException: Cannot invoke "org.apache.hbase.thirdparty.io.netty.buffer.ByteBuf.readableBytes()" because "this.buf" is null at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.buffered(FanOutOneBlockAsyncDFSOutput.java:414) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AsyncProtobufLogWriter.append(AsyncProtobufLogWriter.java:134) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AsyncFSWAL.doAppend(AsyncFSWAL.java:181) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AsyncFSWAL.doAppend(AsyncFSWAL.java:100) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.appendEntry(AbstractFSWAL.java:1333) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.appendAndSync(AbstractFSWAL.java:1724) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.consume(AbstractFSWAL.java:1832) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-05T12:08:46,060 DEBUG [pool-63-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42913-0x101a6a890970001, quorum=127.0.0.1:49240, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-05T12:08:46,060 DEBUG [pool-63-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42913-0x101a6a890970001, quorum=127.0.0.1:49240, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-05T12:08:46,060 INFO [RS:0;80666a11a09f:42913 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-05T12:08:46,061 INFO [RS:0;80666a11a09f:42913 {}] regionserver.HRegionServer(1031): Exiting; stopping=80666a11a09f,42913,1733400130845; zookeeper connection closed. 2024-12-05T12:08:46,061 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@72a84e40 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@72a84e40 2024-12-05T12:08:46,068 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37629-0x101a6a890970002, quorum=127.0.0.1:49240, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-05T12:08:46,068 INFO [RS:1;80666a11a09f:37629 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-05T12:08:46,068 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42305-0x101a6a890970003, quorum=127.0.0.1:49240, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-05T12:08:46,068 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37629-0x101a6a890970002, quorum=127.0.0.1:49240, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-05T12:08:46,068 INFO [RS:1;80666a11a09f:37629 {}] regionserver.HRegionServer(1031): Exiting; stopping=80666a11a09f,37629,1733400130958; zookeeper connection closed. 2024-12-05T12:08:46,068 INFO [RS:2;80666a11a09f:42305 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-05T12:08:46,068 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42305-0x101a6a890970003, quorum=127.0.0.1:49240, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-05T12:08:46,068 INFO [RS:2;80666a11a09f:42305 {}] regionserver.HRegionServer(1031): Exiting; stopping=80666a11a09f,42305,1733400131012; zookeeper connection closed. 2024-12-05T12:08:46,068 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@31080dea {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@31080dea 2024-12-05T12:08:46,069 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@2af9293b {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@2af9293b 2024-12-05T12:08:46,069 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 3 regionserver(s) complete 2024-12-05T12:08:50,361 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.RegionServer.CP_org.apache.hadoop.hbase.security.access.AccessController 2024-12-05T12:08:50,361 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-05T12:08:50,361 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-12-05T12:08:50,362 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testExportExpiredSnapshot 2024-12-05T12:08:50,363 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_acl 2024-12-05T12:08:50,364 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: Master,sub=Coprocessor.Master.CP_org.apache.hadoop.hbase.security.access.AccessController 2024-12-05T12:08:50,364 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: Master,sub=Coprocessor.Master.CP_org.apache.hadoop.hbase.security.access.SecureTestUtil$MasterSyncObserver 2024-12-05T12:08:50,364 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.security.access.AccessController 2024-12-05T12:08:51,243 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-05T12:09:08,984 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-05T12:09:38,984 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. Process Thread Dump: Automatic Stack Trace every 60 seconds waiting on M:0;80666a11a09f:34947 234 active threads Thread 1 (main): State: TIMED_WAITING Blocked count: 1 Waited count: 4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.FutureTask.awaitDone(FutureTask.java:444) java.base@17.0.11/java.util.concurrent.FutureTask.get(FutureTask.java:203) app//org.junit.internal.runners.statements.FailOnTimeout.getResult(FailOnTimeout.java:167) app//org.junit.internal.runners.statements.FailOnTimeout.evaluate(FailOnTimeout.java:128) app//org.apache.hadoop.hbase.SystemExitRule$1.evaluate(SystemExitRule.java:39) app//org.junit.rules.RunRules.evaluate(RunRules.java:20) app//org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) app//org.junit.runners.ParentRunner.run(ParentRunner.java:413) app//org.apache.maven.surefire.junit4.JUnit4Provider.execute(JUnit4Provider.java:316) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeWithRerun(JUnit4Provider.java:240) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeTestSet(JUnit4Provider.java:214) app//org.apache.maven.surefire.junit4.JUnit4Provider.invoke(JUnit4Provider.java:155) app//org.apache.maven.surefire.booter.ForkedBooter.runSuitesInProcess(ForkedBooter.java:385) app//org.apache.maven.surefire.booter.ForkedBooter.execute(ForkedBooter.java:162) app//org.apache.maven.surefire.booter.ForkedBooter.run(ForkedBooter.java:507) app//org.apache.maven.surefire.booter.ForkedBooter.main(ForkedBooter.java:495) Thread 2 (Reference Handler): State: RUNNABLE Blocked count: 6 Waited count: 0 Stack: java.base@17.0.11/java.lang.ref.Reference.waitForReferencePendingList(Native Method) java.base@17.0.11/java.lang.ref.Reference.processPendingReferences(Reference.java:253) java.base@17.0.11/java.lang.ref.Reference$ReferenceHandler.run(Reference.java:215) Thread 3 (Finalizer): State: WAITING Blocked count: 33 Waited count: 19 Waiting on java.lang.ref.ReferenceQueue$Lock@5d1ec60f Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) java.base@17.0.11/java.lang.ref.Finalizer$FinalizerThread.run(Finalizer.java:172) Thread 4 (Signal Dispatcher): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 12 (Common-Cleaner): State: TIMED_WAITING Blocked count: 19 Waited count: 21 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/jdk.internal.ref.CleanerImpl.run(CleanerImpl.java:140) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) java.base@17.0.11/jdk.internal.misc.InnocuousThread.run(InnocuousThread.java:162) Thread 13 (Notification Thread): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 14 (pool-1-thread-1): State: RUNNABLE Blocked count: 2 Waited count: 23 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.poll(EPollPort.java:200) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:281) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 15 (pool-1-thread-2): State: WAITING Blocked count: 0 Waited count: 25 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@464c70eb Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:275) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 16 (surefire-forkedjvm-stream-flusher): State: TIMED_WAITING Blocked count: 0 Waited count: 4629 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 18 (surefire-forkedjvm-command-thread): State: WAITING Blocked count: 0 Waited count: 47 Waiting on java.util.concurrent.CountDownLatch$Sync@1e587b64 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.CountDownLatch.await(CountDownLatch.java:230) java.base@17.0.11/sun.nio.ch.PendingFuture.get(PendingFuture.java:178) app//org.apache.maven.surefire.api.util.internal.Channels$2.read(Channels.java:127) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) app//org.apache.maven.surefire.api.util.internal.Channels$3.readImpl(Channels.java:169) app//org.apache.maven.surefire.api.util.internal.AbstractNoninterruptibleReadableChannel.read(AbstractNoninterruptibleReadableChannel.java:50) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:430) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:419) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.readMessageType(AbstractStreamDecoder.java:116) app//org.apache.maven.surefire.booter.stream.CommandDecoder.decode(CommandDecoder.java:77) app//org.apache.maven.surefire.booter.spi.CommandChannelDecoder.decode(CommandChannelDecoder.java:60) app//org.apache.maven.surefire.booter.CommandReader$CommandRunnable.run(CommandReader.java:290) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 22 (Time-limited test): State: RUNNABLE Blocked count: 12586 Waited count: 13386 Stack: java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo1(Native Method) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:197) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:154) app//org.apache.hadoop.hbase.util.ReflectionUtils.printThreadInfo(ReflectionUtils.java:181) app//org.apache.hadoop.hbase.util.Threads.printThreadInfo(Threads.java:186) app//org.apache.hadoop.hbase.util.Threads.threadDumpingIsAlive(Threads.java:113) app//org.apache.hadoop.hbase.LocalHBaseCluster.join(LocalHBaseCluster.java:396) app//org.apache.hadoop.hbase.SingleProcessHBaseCluster.waitUntilShutDown(SingleProcessHBaseCluster.java:886) app//org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1038) app//org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) app//org.apache.hadoop.hbase.snapshot.TestExportSnapshot.tearDownAfterClass(TestExportSnapshot.java:123) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) java.base@17.0.11/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) java.base@17.0.11/java.lang.reflect.Method.invoke(Method.java:568) app//org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) app//org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) app//org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) app//org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) app//org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) Thread 23 (org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner): State: WAITING Blocked count: 13 Waited count: 14 Waiting on java.lang.ref.ReferenceQueue$Lock@73bb6236 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner.run(FileSystem.java:4171) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 25 (SSL Certificates Store Monitor): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.TaskQueue@56559f82 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 34 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@210db1f9): State: TIMED_WAITING Blocked count: 0 Waited count: 920 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 35 (GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100): State: TIMED_WAITING Blocked count: 0 Waited count: 93 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Thread 36 (pool-6-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 37 (qtp1788055457-37): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f1fb042ac40.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 38 (qtp1788055457-38): State: RUNNABLE Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f1fb042ac40.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 39 (qtp1788055457-39): State: RUNNABLE Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f1fb042ac40.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 40 (qtp1788055457-40): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f1fb042ac40.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 41 (qtp1788055457-41-acceptor-0@64a549f8-ServerConnector@322d032a{HTTP/1.1, (http/1.1)}{localhost:44819}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 42 (qtp1788055457-42): State: TIMED_WAITING Blocked count: 0 Waited count: 8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 43 (qtp1788055457-43): State: TIMED_WAITING Blocked count: 0 Waited count: 8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 44 (qtp1788055457-44): State: TIMED_WAITING Blocked count: 0 Waited count: 8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 45 (Session-HouseKeeper-48adee89-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 46 (pool-7-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 53 (FSEditLogAsync): State: WAITING Blocked count: 37 Waited count: 3500 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@463b12d1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.dequeueEdit(FSEditLogAsync.java:241) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.run(FSEditLogAsync.java:250) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 55 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 56 (IPC Server idle connection scanner for port 37541): State: TIMED_WAITING Blocked count: 1 Waited count: 47 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 58 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 92 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 61 (org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor@37f20e91): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor.run(PendingReconstructionBlocks.java:267) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 62 (DatanodeAdminMonitor-0): State: TIMED_WAITING Blocked count: 0 Waited count: 154 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 49 (org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor@308b96ef): State: TIMED_WAITING Blocked count: 0 Waited count: 92 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor.run(HeartbeatManager.java:563) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 47 (RedundancyMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 154 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) java.base@17.0.11/java.lang.Thread.sleep(Thread.java:344) java.base@17.0.11/java.util.concurrent.TimeUnit.sleep(TimeUnit.java:446) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$RedundancyMonitor.run(BlockManager.java:5352) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 48 (MarkedDeleteBlockScrubberThread): State: TIMED_WAITING Blocked count: 0 Waited count: 45284 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$MarkedDeleteBlockScrubber.run(BlockManager.java:5326) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 51 (Block report processor): State: WAITING Blocked count: 1 Waited count: 1564 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@554421ec Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.processQueue(BlockManager.java:5627) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.run(BlockManager.java:5614) Thread 57 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 54 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 64 (IPC Server handler 0 on default port 37541): State: TIMED_WAITING Blocked count: 125 Waited count: 2689 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 65 (IPC Server handler 1 on default port 37541): State: TIMED_WAITING Blocked count: 112 Waited count: 2680 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 66 (IPC Server handler 2 on default port 37541): State: TIMED_WAITING Blocked count: 104 Waited count: 2692 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 67 (IPC Server handler 3 on default port 37541): State: TIMED_WAITING Blocked count: 113 Waited count: 2699 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 68 (IPC Server handler 4 on default port 37541): State: TIMED_WAITING Blocked count: 116 Waited count: 2682 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 69 (pool-12-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 71 (org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor@2f8bc2): State: TIMED_WAITING Blocked count: 0 Waited count: 230 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor.run(LeaseManager.java:537) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 72 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor@641be552): State: TIMED_WAITING Blocked count: 0 Waited count: 92 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor.run(FSNamesystem.java:4550) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 73 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller@54c54640): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller.run(FSNamesystem.java:4592) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 74 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber@401c0ac3): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber.run(FSNamesystem.java:4689) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 75 (CacheReplicationMonitor(1339209683)): State: TIMED_WAITING Blocked count: 0 Waited count: 17 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hdfs.server.blockmanagement.CacheReplicationMonitor.run(CacheReplicationMonitor.java:186) Thread 86 (pool-18-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 87 (qtp342995557-87): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f1fb042ac40.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 88 (qtp342995557-88): State: TIMED_WAITING Blocked count: 0 Waited count: 8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 89 (qtp342995557-89-acceptor-0@49a135d1-ServerConnector@3cbfa094{HTTP/1.1, (http/1.1)}{localhost:44277}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 90 (qtp342995557-90): State: TIMED_WAITING Blocked count: 0 Waited count: 8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 91 (Session-HouseKeeper-bdabc30-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 92 (nioEventLoopGroup-2-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 93 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@753c951a): State: TIMED_WAITING Blocked count: 0 Waited count: 917 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 95 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 96 (IPC Server idle connection scanner for port 37503): State: TIMED_WAITING Blocked count: 1 Waited count: 47 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 98 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 92 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 101 (Command processor): State: WAITING Blocked count: 1 Waited count: 321 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2bad99a9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 102 (BP-1735945732-172.17.0.2-1733400125338 heartbeating to localhost/127.0.0.1:37541): State: TIMED_WAITING Blocked count: 1540 Waited count: 1624 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 103 (pool-20-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 85 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@7de90c1a): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 97 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 94 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 104 (IPC Server handler 0 on default port 37503): State: TIMED_WAITING Blocked count: 0 Waited count: 484 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 105 (IPC Server handler 1 on default port 37503): State: TIMED_WAITING Blocked count: 0 Waited count: 490 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 106 (IPC Server handler 2 on default port 37503): State: TIMED_WAITING Blocked count: 0 Waited count: 477 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 107 (IPC Server handler 3 on default port 37503): State: TIMED_WAITING Blocked count: 0 Waited count: 475 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 108 (IPC Server handler 4 on default port 37503): State: TIMED_WAITING Blocked count: 0 Waited count: 483 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 118 (pool-26-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 119 (qtp764570459-119): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f1fb042ac40.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 120 (qtp764570459-120-acceptor-0@11e28768-ServerConnector@68f62052{HTTP/1.1, (http/1.1)}{localhost:36661}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 121 (qtp764570459-121): State: TIMED_WAITING Blocked count: 0 Waited count: 8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 123 (qtp764570459-123): State: TIMED_WAITING Blocked count: 0 Waited count: 8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 125 (Session-HouseKeeper-5b381830-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 122 (IPC Client (1055630025) connection to localhost/127.0.0.1:37541 from jenkins): State: TIMED_WAITING Blocked count: 1506 Waited count: 1506 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Thread 124 (IPC Parameter Sending Thread for localhost/127.0.0.1:37541): State: TIMED_WAITING Blocked count: 0 Waited count: 2307 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 126 (nioEventLoopGroup-4-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 127 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@649bbc64): State: TIMED_WAITING Blocked count: 0 Waited count: 917 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 129 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 130 (IPC Server idle connection scanner for port 41019): State: TIMED_WAITING Blocked count: 1 Waited count: 47 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 132 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 92 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 135 (Command processor): State: WAITING Blocked count: 3 Waited count: 361 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@592a8c78 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 136 (BP-1735945732-172.17.0.2-1733400125338 heartbeating to localhost/127.0.0.1:37541): State: TIMED_WAITING Blocked count: 1538 Waited count: 1620 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 137 (pool-29-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 117 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@29d9e750): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 131 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 128 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 138 (IPC Server handler 0 on default port 41019): State: TIMED_WAITING Blocked count: 0 Waited count: 473 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 139 (IPC Server handler 1 on default port 41019): State: TIMED_WAITING Blocked count: 0 Waited count: 464 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 140 (IPC Server handler 2 on default port 41019): State: TIMED_WAITING Blocked count: 0 Waited count: 495 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 141 (IPC Server handler 3 on default port 41019): State: TIMED_WAITING Blocked count: 0 Waited count: 477 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 142 (IPC Server handler 4 on default port 41019): State: TIMED_WAITING Blocked count: 0 Waited count: 473 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 152 (pool-36-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 153 (qtp153931968-153): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f1fb042ac40.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 154 (qtp153931968-154-acceptor-0@22334216-ServerConnector@2fadfd2c{HTTP/1.1, (http/1.1)}{localhost:45127}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 155 (qtp153931968-155): State: TIMED_WAITING Blocked count: 0 Waited count: 8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 156 (qtp153931968-156): State: TIMED_WAITING Blocked count: 0 Waited count: 8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 157 (Session-HouseKeeper-eb557c8-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 158 (nioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 159 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@20315e3d): State: TIMED_WAITING Blocked count: 0 Waited count: 916 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 161 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 162 (IPC Server idle connection scanner for port 44125): State: TIMED_WAITING Blocked count: 1 Waited count: 47 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 164 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 92 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 167 (Command processor): State: WAITING Blocked count: 0 Waited count: 260 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@523a2d3d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 168 (BP-1735945732-172.17.0.2-1733400125338 heartbeating to localhost/127.0.0.1:37541): State: TIMED_WAITING Blocked count: 1460 Waited count: 1622 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 169 (pool-38-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 151 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@65a8f63c): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 163 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 160 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 170 (IPC Server handler 0 on default port 44125): State: TIMED_WAITING Blocked count: 0 Waited count: 485 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 171 (IPC Server handler 1 on default port 44125): State: TIMED_WAITING Blocked count: 0 Waited count: 486 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 172 (IPC Server handler 2 on default port 44125): State: TIMED_WAITING Blocked count: 0 Waited count: 520 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 173 (IPC Server handler 3 on default port 44125): State: TIMED_WAITING Blocked count: 0 Waited count: 520 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 174 (IPC Server handler 4 on default port 44125): State: TIMED_WAITING Blocked count: 0 Waited count: 471 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 186 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/b25dcafd-a00c-c924-93f8-4d057d2ca201/cluster_25076363-b5ee-b0f5-182c-ca76f44a18a7/data/data1)): State: TIMED_WAITING Blocked count: 31 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 185 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/b25dcafd-a00c-c924-93f8-4d057d2ca201/cluster_25076363-b5ee-b0f5-182c-ca76f44a18a7/data/data3)): State: TIMED_WAITING Blocked count: 30 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 187 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/b25dcafd-a00c-c924-93f8-4d057d2ca201/cluster_25076363-b5ee-b0f5-182c-ca76f44a18a7/data/data4)): State: TIMED_WAITING Blocked count: 21 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 188 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/b25dcafd-a00c-c924-93f8-4d057d2ca201/cluster_25076363-b5ee-b0f5-182c-ca76f44a18a7/data/data2)): State: TIMED_WAITING Blocked count: 25 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 199 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/b25dcafd-a00c-c924-93f8-4d057d2ca201/cluster_25076363-b5ee-b0f5-182c-ca76f44a18a7/data/data2/current/BP-1735945732-172.17.0.2-1733400125338): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 198 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/b25dcafd-a00c-c924-93f8-4d057d2ca201/cluster_25076363-b5ee-b0f5-182c-ca76f44a18a7/data/data3/current/BP-1735945732-172.17.0.2-1733400125338): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 200 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/b25dcafd-a00c-c924-93f8-4d057d2ca201/cluster_25076363-b5ee-b0f5-182c-ca76f44a18a7/data/data4/current/BP-1735945732-172.17.0.2-1733400125338): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 201 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/b25dcafd-a00c-c924-93f8-4d057d2ca201/cluster_25076363-b5ee-b0f5-182c-ca76f44a18a7/data/data1/current/BP-1735945732-172.17.0.2-1733400125338): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 216 (pool-23-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 215 (pool-15-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 219 (java.util.concurrent.ThreadPoolExecutor$Worker@250cff32[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 222 (java.util.concurrent.ThreadPoolExecutor$Worker@46124118[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 223 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/b25dcafd-a00c-c924-93f8-4d057d2ca201/cluster_25076363-b5ee-b0f5-182c-ca76f44a18a7/data/data5)): State: TIMED_WAITING Blocked count: 2 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 224 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/b25dcafd-a00c-c924-93f8-4d057d2ca201/cluster_25076363-b5ee-b0f5-182c-ca76f44a18a7/data/data6)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 230 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/b25dcafd-a00c-c924-93f8-4d057d2ca201/cluster_25076363-b5ee-b0f5-182c-ca76f44a18a7/data/data6/current/BP-1735945732-172.17.0.2-1733400125338): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 229 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/b25dcafd-a00c-c924-93f8-4d057d2ca201/cluster_25076363-b5ee-b0f5-182c-ca76f44a18a7/data/data5/current/BP-1735945732-172.17.0.2-1733400125338): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 234 (pool-33-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 237 (java.util.concurrent.ThreadPoolExecutor$Worker@10dc8883[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 238 (FsDatasetAsyncDiskServiceFixer): State: TIMED_WAITING Blocked count: 0 Waited count: 16 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer.run(HBaseTestingUtil.java:576) Thread 240 (NIOServerCxnFactory.SelectorThread-0): State: RUNNABLE Blocked count: 2 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 241 (NIOServerCxnFactory.SelectorThread-1): State: RUNNABLE Blocked count: 6 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 242 (NIOServerCxnFactory.AcceptThread:localhost/127.0.0.1:49240): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.select(NIOServerCnxnFactory.java:205) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.run(NIOServerCnxnFactory.java:181) Thread 239 (ConnnectionExpirer): State: TIMED_WAITING Blocked count: 0 Waited count: 46 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.NIOServerCnxnFactory$ConnectionExpirerThread.run(NIOServerCnxnFactory.java:554) Thread 243 (SessionTracker): State: TIMED_WAITING Blocked count: 0 Waited count: 229 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.SessionTrackerImpl.run(SessionTrackerImpl.java:163) Thread 244 (SyncThread:0): State: WAITING Blocked count: 16 Waited count: 353 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7af3405f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.SyncRequestProcessor.run(SyncRequestProcessor.java:170) Thread 245 (ProcessThread(sid:0 cport:49240):): State: WAITING Blocked count: 2 Waited count: 434 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@76765824 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.PrepRequestProcessor.run(PrepRequestProcessor.java:142) Thread 246 (RequestThrottler): State: WAITING Blocked count: 0 Waited count: 474 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@665c782c Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.RequestThrottler.run(RequestThrottler.java:147) Thread 247 (NIOWorkerThread-1): State: WAITING Blocked count: 0 Waited count: 95 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7213fd1e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 249 (LeaseRenewer:jenkins@localhost:37541): State: TIMED_WAITING Blocked count: 13 Waited count: 477 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 258 (weak-ref-cleaner-strictcontextstorage): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.lang.ref.ReferenceQueue$Lock@1282aabb Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//io.opentelemetry.context.StrictContextStorage$PendingScopes.run(StrictContextStorage.java:269) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 259 (HBase-Metrics2-1): State: TIMED_WAITING Blocked count: 0 Waited count: 567 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 260 (HMaster-EventLoopGroup-1-1): State: RUNNABLE Blocked count: 41 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 261 (Time-limited test-SendThread(127.0.0.1:49240)): State: RUNNABLE Blocked count: 16 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.zookeeper.ClientCnxnSocketNIO.doTransport(ClientCnxnSocketNIO.java:332) app//org.apache.zookeeper.ClientCnxn$SendThread.run(ClientCnxn.java:1289) Thread 262 (Time-limited test-EventThread): State: WAITING Blocked count: 22 Waited count: 56 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5369e84d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:550) Thread 263 (NIOWorkerThread-2): State: WAITING Blocked count: 2 Waited count: 96 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7213fd1e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 264 (NIOWorkerThread-3): State: WAITING Blocked count: 2 Waited count: 96 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7213fd1e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 265 (NIOWorkerThread-4): State: WAITING Blocked count: 8 Waited count: 97 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7213fd1e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 266 (zk-event-processor-pool-0): State: WAITING Blocked count: 19 Waited count: 71 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3eff1433 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 267 (NIOWorkerThread-5): State: WAITING Blocked count: 3 Waited count: 94 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7213fd1e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 268 (NIOWorkerThread-6): State: WAITING Blocked count: 3 Waited count: 95 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7213fd1e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 269 (NIOWorkerThread-7): State: WAITING Blocked count: 4 Waited count: 96 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7213fd1e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 270 (NIOWorkerThread-8): State: WAITING Blocked count: 4 Waited count: 94 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7213fd1e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 271 (NIOWorkerThread-9): State: WAITING Blocked count: 8 Waited count: 95 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7213fd1e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 272 (NIOWorkerThread-10): State: WAITING Blocked count: 2 Waited count: 95 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7213fd1e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 273 (NIOWorkerThread-11): State: WAITING Blocked count: 7 Waited count: 95 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7213fd1e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 274 (NIOWorkerThread-12): State: WAITING Blocked count: 3 Waited count: 94 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7213fd1e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 275 (NIOWorkerThread-13): State: WAITING Blocked count: 2 Waited count: 94 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7213fd1e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 276 (NIOWorkerThread-14): State: WAITING Blocked count: 5 Waited count: 94 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7213fd1e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 277 (NIOWorkerThread-15): State: WAITING Blocked count: 6 Waited count: 95 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7213fd1e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 278 (NIOWorkerThread-16): State: WAITING Blocked count: 3 Waited count: 95 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7213fd1e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 280 (RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34947): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@39afb625 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 281 (RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947): State: WAITING Blocked count: 184 Waited count: 707 Waiting on java.util.concurrent.Semaphore$NonfairSync@643ea268 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 282 (RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947): State: WAITING Blocked count: 61 Waited count: 318 Waiting on java.util.concurrent.Semaphore$NonfairSync@78091522 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 283 (RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34947): State: WAITING Blocked count: 45 Waited count: 9858 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3e91d89c Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 284 (RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=34947): State: WAITING Blocked count: 0 Waited count: 4 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7a4b5ff5 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 285 (RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=34947): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7a4b5ff5 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 286 (RpcServer.replication.FPBQ.Fifo.handler=0,queue=0,port=34947): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@6b778f36 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 287 (RpcServer.replication.FPBQ.Fifo.handler=1,queue=0,port=34947): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@5b67377f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 288 (RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=34947): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@6b8251b7 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 289 (RpcServer.metaPriority.FPBQ.Fifo.handler=0,queue=0,port=34947): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.Semaphore$NonfairSync@4b960fc5 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 292 (Time-limited test.named-queue-events-pool-0): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1ccf37be Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//com.lmax.disruptor.BlockingWaitStrategy.waitFor(BlockingWaitStrategy.java:47) app//com.lmax.disruptor.ProcessingSequenceBarrier.waitFor(ProcessingSequenceBarrier.java:56) app//com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:159) app//com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 293 (MiniHBaseClusterRegionServer-EventLoopGroup-3-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 313 (MiniHBaseClusterRegionServer-EventLoopGroup-4-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 333 (MiniHBaseClusterRegionServer-EventLoopGroup-5-1): State: RUNNABLE Blocked count: 136 Waited count: 3 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 290 (M:0;80666a11a09f:34947): State: TIMED_WAITING Blocked count: 12 Waited count: 3959 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hbase.regionserver.wal.SyncFuture.get(SyncFuture.java:169) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.blockOnSync(AbstractFSWAL.java:1029) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.doSync(AbstractFSWAL.java:1940) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$sync$2(AbstractFSWAL.java:723) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$1110/0x00007f1fb0f966b8.run(Unknown Source) app//org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:723) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:713) app//org.apache.hadoop.hbase.regionserver.HRegion.doSyncOfUnflushedWALChanges(HRegion.java:2935) app//org.apache.hadoop.hbase.regionserver.HRegion.internalPrepareFlushCache(HRegion.java:2876) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2735) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2709) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2700) app//org.apache.hadoop.hbase.regionserver.HRegion.doClose(HRegion.java:1862) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1672) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1627) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1610) Thread 354 (Monitor thread for TaskMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 46 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.monitoring.TaskMonitor$MonitorRunnable.run(TaskMonitor.java:325) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 356 (master/80666a11a09f:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 358 (master/80666a11a09f:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 360 (org.apache.hadoop.hdfs.PeerCache@23371030): State: TIMED_WAITING Blocked count: 0 Waited count: 152 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.PeerCache.run(PeerCache.java:253) app//org.apache.hadoop.hdfs.PeerCache.access$000(PeerCache.java:46) app//org.apache.hadoop.hdfs.PeerCache$1.run(PeerCache.java:124) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 378 (master:store-WAL-Roller): State: TIMED_WAITING Blocked count: 0 Waited count: 4541 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hbase.wal.AbstractWALRoller.run(AbstractWALRoller.java:180) Thread 395 (MiniHBaseClusterRegionServer-EventLoopGroup-5-2): State: RUNNABLE Blocked count: 62 Waited count: 2 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 396 (MiniHBaseClusterRegionServer-EventLoopGroup-5-3): State: RUNNABLE Blocked count: 128 Waited count: 3 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 409 (Idle-Rpc-Conn-Sweeper-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 151 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 420 (SnapshotHandlerChoreCleaner): State: TIMED_WAITING Blocked count: 0 Waited count: 46 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 408 (RpcClient-timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 45341 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 433 (HMaster-EventLoopGroup-1-2): State: RUNNABLE Blocked count: 25 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 436 (HMaster-EventLoopGroup-1-3): State: RUNNABLE Blocked count: 44 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 455 (RegionServerTracker-0): State: WAITING Blocked count: 9 Waited count: 11 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@577766f0 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 470 (regionserver/80666a11a09f:0.procedureResultReporter): State: WAITING Blocked count: 18 Waited count: 37 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@bb7c9b2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 481 (regionserver/80666a11a09f:0.procedureResultReporter): State: WAITING Blocked count: 25 Waited count: 44 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@f6f5f61 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 479 (regionserver/80666a11a09f:0.procedureResultReporter): State: WAITING Blocked count: 7 Waited count: 15 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2aa5eb00 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 508 (LeaseRenewer:jenkins.hfs.0@localhost:37541): State: TIMED_WAITING Blocked count: 15 Waited count: 475 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 507 (LeaseRenewer:jenkins.hfs.2@localhost:37541): State: TIMED_WAITING Blocked count: 14 Waited count: 474 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 509 (LeaseRenewer:jenkins.hfs.1@localhost:37541): State: TIMED_WAITING Blocked count: 14 Waited count: 478 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 514 (MiniHBaseClusterRegionServer-EventLoopGroup-4-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 525 (MiniHBaseClusterRegionServer-EventLoopGroup-4-3): State: RUNNABLE Blocked count: 3 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 526 (region-location-0): State: WAITING Blocked count: 12 Waited count: 19 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@46095842 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 407 (Async-Client-Retry-Timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 45202 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 539 (RPCClient-NioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 548 (RPCClient-NioEventLoopGroup-6-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 549 (RPCClient-NioEventLoopGroup-6-3): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 560 (ForkJoinPool.commonPool-worker-2): State: WAITING Blocked count: 0 Waited count: 501 Waiting on java.util.concurrent.ForkJoinPool@73c6f368 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 582 (region-location-1): State: WAITING Blocked count: 4 Waited count: 7 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@46095842 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 583 (region-location-2): State: WAITING Blocked count: 2 Waited count: 5 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@46095842 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 985 (MutableQuantiles-0): State: TIMED_WAITING Blocked count: 0 Waited count: 948 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1048 (RPCClient-NioEventLoopGroup-6-4): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1088 (zk-permission-watcher-pool-0): State: WAITING Blocked count: 70 Waited count: 116 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3df3819 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1092 (RPCClient-NioEventLoopGroup-6-5): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1236 (RPCClient-NioEventLoopGroup-6-6): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1237 (RPCClient-NioEventLoopGroup-6-7): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1238 (RPCClient-NioEventLoopGroup-6-8): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1239 (MiniHBaseClusterRegionServer-EventLoopGroup-3-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1289 (RPCClient-NioEventLoopGroup-6-9): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1290 (RPCClient-NioEventLoopGroup-6-10): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1291 (RPCClient-NioEventLoopGroup-6-11): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1293 (RPCClient-NioEventLoopGroup-6-12): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1294 (RPCClient-NioEventLoopGroup-6-13): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1430 (ForkJoinPool.commonPool-worker-3): State: WAITING Blocked count: 0 Waited count: 942 Waiting on java.util.concurrent.ForkJoinPool@73c6f368 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 1650 (Container metrics unregistration): State: WAITING Blocked count: 11 Waited count: 70 Waiting on java.util.TaskQueue@f18bb68 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 2011 (region-location-3): State: WAITING Blocked count: 3 Waited count: 7 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@46095842 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2012 (region-location-4): State: WAITING Blocked count: 3 Waited count: 7 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@46095842 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2134 (MiniHBaseClusterRegionServer-EventLoopGroup-3-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2142 (RPCClient-NioEventLoopGroup-6-14): State: RUNNABLE Blocked count: 2 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2333 (ForkJoinPool.commonPool-worker-4): State: TIMED_WAITING Blocked count: 0 Waited count: 677 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkUntil(LockSupport.java:410) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1726) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 2854 (ForkJoinPool.commonPool-worker-5): State: WAITING Blocked count: 0 Waited count: 569 Waiting on java.util.concurrent.ForkJoinPool@73c6f368 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 11442 (AsyncFSWAL-1-hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/MasterData-prefix:80666a11a09f,34947,1733400129965): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4b015450 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 11445 (Timer for 'JobHistoryServer' metrics system): State: TIMED_WAITING Blocked count: 0 Waited count: 6 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) 2024-12-05T12:10:08,985 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-05T12:10:38,985 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. Process Thread Dump: Automatic Stack Trace every 60 seconds waiting on M:0;80666a11a09f:34947 229 active threads Thread 1 (main): State: TIMED_WAITING Blocked count: 1 Waited count: 4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.FutureTask.awaitDone(FutureTask.java:444) java.base@17.0.11/java.util.concurrent.FutureTask.get(FutureTask.java:203) app//org.junit.internal.runners.statements.FailOnTimeout.getResult(FailOnTimeout.java:167) app//org.junit.internal.runners.statements.FailOnTimeout.evaluate(FailOnTimeout.java:128) app//org.apache.hadoop.hbase.SystemExitRule$1.evaluate(SystemExitRule.java:39) app//org.junit.rules.RunRules.evaluate(RunRules.java:20) app//org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) app//org.junit.runners.ParentRunner.run(ParentRunner.java:413) app//org.apache.maven.surefire.junit4.JUnit4Provider.execute(JUnit4Provider.java:316) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeWithRerun(JUnit4Provider.java:240) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeTestSet(JUnit4Provider.java:214) app//org.apache.maven.surefire.junit4.JUnit4Provider.invoke(JUnit4Provider.java:155) app//org.apache.maven.surefire.booter.ForkedBooter.runSuitesInProcess(ForkedBooter.java:385) app//org.apache.maven.surefire.booter.ForkedBooter.execute(ForkedBooter.java:162) app//org.apache.maven.surefire.booter.ForkedBooter.run(ForkedBooter.java:507) app//org.apache.maven.surefire.booter.ForkedBooter.main(ForkedBooter.java:495) Thread 2 (Reference Handler): State: RUNNABLE Blocked count: 7 Waited count: 0 Stack: java.base@17.0.11/java.lang.ref.Reference.waitForReferencePendingList(Native Method) java.base@17.0.11/java.lang.ref.Reference.processPendingReferences(Reference.java:253) java.base@17.0.11/java.lang.ref.Reference$ReferenceHandler.run(Reference.java:215) Thread 3 (Finalizer): State: WAITING Blocked count: 34 Waited count: 20 Waiting on java.lang.ref.ReferenceQueue$Lock@5d1ec60f Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) java.base@17.0.11/java.lang.ref.Finalizer$FinalizerThread.run(Finalizer.java:172) Thread 4 (Signal Dispatcher): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 12 (Common-Cleaner): State: TIMED_WAITING Blocked count: 20 Waited count: 23 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/jdk.internal.ref.CleanerImpl.run(CleanerImpl.java:140) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) java.base@17.0.11/jdk.internal.misc.InnocuousThread.run(InnocuousThread.java:162) Thread 13 (Notification Thread): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 14 (pool-1-thread-1): State: RUNNABLE Blocked count: 2 Waited count: 26 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.poll(EPollPort.java:200) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:281) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 15 (pool-1-thread-2): State: WAITING Blocked count: 0 Waited count: 28 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@464c70eb Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:275) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 16 (surefire-forkedjvm-stream-flusher): State: TIMED_WAITING Blocked count: 0 Waited count: 5228 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 18 (surefire-forkedjvm-command-thread): State: WAITING Blocked count: 0 Waited count: 53 Waiting on java.util.concurrent.CountDownLatch$Sync@e0df46c Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.CountDownLatch.await(CountDownLatch.java:230) java.base@17.0.11/sun.nio.ch.PendingFuture.get(PendingFuture.java:178) app//org.apache.maven.surefire.api.util.internal.Channels$2.read(Channels.java:127) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) app//org.apache.maven.surefire.api.util.internal.Channels$3.readImpl(Channels.java:169) app//org.apache.maven.surefire.api.util.internal.AbstractNoninterruptibleReadableChannel.read(AbstractNoninterruptibleReadableChannel.java:50) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:430) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:419) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.readMessageType(AbstractStreamDecoder.java:116) app//org.apache.maven.surefire.booter.stream.CommandDecoder.decode(CommandDecoder.java:77) app//org.apache.maven.surefire.booter.spi.CommandChannelDecoder.decode(CommandChannelDecoder.java:60) app//org.apache.maven.surefire.booter.CommandReader$CommandRunnable.run(CommandReader.java:290) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 22 (Time-limited test): State: RUNNABLE Blocked count: 12586 Waited count: 13387 Stack: java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo1(Native Method) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:197) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:154) app//org.apache.hadoop.hbase.util.ReflectionUtils.printThreadInfo(ReflectionUtils.java:181) app//org.apache.hadoop.hbase.util.Threads.printThreadInfo(Threads.java:186) app//org.apache.hadoop.hbase.util.Threads.threadDumpingIsAlive(Threads.java:113) app//org.apache.hadoop.hbase.LocalHBaseCluster.join(LocalHBaseCluster.java:396) app//org.apache.hadoop.hbase.SingleProcessHBaseCluster.waitUntilShutDown(SingleProcessHBaseCluster.java:886) app//org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1038) app//org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) app//org.apache.hadoop.hbase.snapshot.TestExportSnapshot.tearDownAfterClass(TestExportSnapshot.java:123) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) java.base@17.0.11/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) java.base@17.0.11/java.lang.reflect.Method.invoke(Method.java:568) app//org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) app//org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) app//org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) app//org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) app//org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) Thread 23 (org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner): State: WAITING Blocked count: 15 Waited count: 16 Waiting on java.lang.ref.ReferenceQueue$Lock@73bb6236 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner.run(FileSystem.java:4171) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 25 (SSL Certificates Store Monitor): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.TaskQueue@56559f82 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 34 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@210db1f9): State: TIMED_WAITING Blocked count: 0 Waited count: 1040 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 35 (GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100): State: TIMED_WAITING Blocked count: 0 Waited count: 105 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Thread 36 (pool-6-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 37 (qtp1788055457-37): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f1fb042ac40.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 38 (qtp1788055457-38): State: RUNNABLE Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f1fb042ac40.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 39 (qtp1788055457-39): State: RUNNABLE Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f1fb042ac40.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 40 (qtp1788055457-40): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f1fb042ac40.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 41 (qtp1788055457-41-acceptor-0@64a549f8-ServerConnector@322d032a{HTTP/1.1, (http/1.1)}{localhost:44819}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 42 (qtp1788055457-42): State: TIMED_WAITING Blocked count: 0 Waited count: 9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 43 (qtp1788055457-43): State: TIMED_WAITING Blocked count: 0 Waited count: 9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 44 (qtp1788055457-44): State: TIMED_WAITING Blocked count: 0 Waited count: 9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 45 (Session-HouseKeeper-48adee89-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 46 (pool-7-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 53 (FSEditLogAsync): State: WAITING Blocked count: 37 Waited count: 3500 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@463b12d1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.dequeueEdit(FSEditLogAsync.java:241) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.run(FSEditLogAsync.java:250) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 55 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 56 (IPC Server idle connection scanner for port 37541): State: TIMED_WAITING Blocked count: 1 Waited count: 53 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 58 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 104 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 61 (org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor@37f20e91): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor.run(PendingReconstructionBlocks.java:267) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 62 (DatanodeAdminMonitor-0): State: TIMED_WAITING Blocked count: 0 Waited count: 174 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 49 (org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor@308b96ef): State: TIMED_WAITING Blocked count: 0 Waited count: 104 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor.run(HeartbeatManager.java:563) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 47 (RedundancyMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 174 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) java.base@17.0.11/java.lang.Thread.sleep(Thread.java:344) java.base@17.0.11/java.util.concurrent.TimeUnit.sleep(TimeUnit.java:446) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$RedundancyMonitor.run(BlockManager.java:5352) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 48 (MarkedDeleteBlockScrubberThread): State: TIMED_WAITING Blocked count: 0 Waited count: 51210 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$MarkedDeleteBlockScrubber.run(BlockManager.java:5326) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 51 (Block report processor): State: WAITING Blocked count: 1 Waited count: 1564 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@554421ec Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.processQueue(BlockManager.java:5627) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.run(BlockManager.java:5614) Thread 57 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 54 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 64 (IPC Server handler 0 on default port 37541): State: TIMED_WAITING Blocked count: 125 Waited count: 2749 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 65 (IPC Server handler 1 on default port 37541): State: TIMED_WAITING Blocked count: 112 Waited count: 2741 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 66 (IPC Server handler 2 on default port 37541): State: TIMED_WAITING Blocked count: 104 Waited count: 2752 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 67 (IPC Server handler 3 on default port 37541): State: TIMED_WAITING Blocked count: 117 Waited count: 2759 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 68 (IPC Server handler 4 on default port 37541): State: TIMED_WAITING Blocked count: 116 Waited count: 2742 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 69 (pool-12-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 71 (org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor@2f8bc2): State: TIMED_WAITING Blocked count: 0 Waited count: 260 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor.run(LeaseManager.java:537) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 72 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor@641be552): State: TIMED_WAITING Blocked count: 0 Waited count: 104 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor.run(FSNamesystem.java:4550) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 73 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller@54c54640): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller.run(FSNamesystem.java:4592) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 74 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber@401c0ac3): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber.run(FSNamesystem.java:4689) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 75 (CacheReplicationMonitor(1339209683)): State: TIMED_WAITING Blocked count: 0 Waited count: 19 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hdfs.server.blockmanagement.CacheReplicationMonitor.run(CacheReplicationMonitor.java:186) Thread 86 (pool-18-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 87 (qtp342995557-87): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f1fb042ac40.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 88 (qtp342995557-88): State: TIMED_WAITING Blocked count: 0 Waited count: 9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 89 (qtp342995557-89-acceptor-0@49a135d1-ServerConnector@3cbfa094{HTTP/1.1, (http/1.1)}{localhost:44277}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 90 (qtp342995557-90): State: TIMED_WAITING Blocked count: 0 Waited count: 9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 91 (Session-HouseKeeper-bdabc30-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 92 (nioEventLoopGroup-2-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 93 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@753c951a): State: TIMED_WAITING Blocked count: 0 Waited count: 1037 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 95 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 96 (IPC Server idle connection scanner for port 37503): State: TIMED_WAITING Blocked count: 1 Waited count: 53 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 98 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 104 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 101 (Command processor): State: WAITING Blocked count: 1 Waited count: 341 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2bad99a9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 102 (BP-1735945732-172.17.0.2-1733400125338 heartbeating to localhost/127.0.0.1:37541): State: TIMED_WAITING Blocked count: 1561 Waited count: 1667 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 103 (pool-20-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 85 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@7de90c1a): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 97 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 94 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 104 (IPC Server handler 0 on default port 37503): State: TIMED_WAITING Blocked count: 0 Waited count: 544 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 105 (IPC Server handler 1 on default port 37503): State: TIMED_WAITING Blocked count: 0 Waited count: 550 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 106 (IPC Server handler 2 on default port 37503): State: TIMED_WAITING Blocked count: 0 Waited count: 537 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 107 (IPC Server handler 3 on default port 37503): State: TIMED_WAITING Blocked count: 0 Waited count: 535 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 108 (IPC Server handler 4 on default port 37503): State: TIMED_WAITING Blocked count: 0 Waited count: 543 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 118 (pool-26-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 119 (qtp764570459-119): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f1fb042ac40.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 120 (qtp764570459-120-acceptor-0@11e28768-ServerConnector@68f62052{HTTP/1.1, (http/1.1)}{localhost:36661}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 121 (qtp764570459-121): State: TIMED_WAITING Blocked count: 0 Waited count: 9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 123 (qtp764570459-123): State: TIMED_WAITING Blocked count: 0 Waited count: 9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 125 (Session-HouseKeeper-5b381830-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 122 (IPC Client (1055630025) connection to localhost/127.0.0.1:37541 from jenkins): State: TIMED_WAITING Blocked count: 1561 Waited count: 1561 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Thread 124 (IPC Parameter Sending Thread for localhost/127.0.0.1:37541): State: TIMED_WAITING Blocked count: 0 Waited count: 2363 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 126 (nioEventLoopGroup-4-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 127 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@649bbc64): State: TIMED_WAITING Blocked count: 0 Waited count: 1037 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 129 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 130 (IPC Server idle connection scanner for port 41019): State: TIMED_WAITING Blocked count: 1 Waited count: 53 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 132 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 104 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 135 (Command processor): State: WAITING Blocked count: 3 Waited count: 381 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@592a8c78 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 136 (BP-1735945732-172.17.0.2-1733400125338 heartbeating to localhost/127.0.0.1:37541): State: TIMED_WAITING Blocked count: 1558 Waited count: 1661 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 137 (pool-29-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 117 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@29d9e750): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 131 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 128 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 138 (IPC Server handler 0 on default port 41019): State: TIMED_WAITING Blocked count: 0 Waited count: 533 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 139 (IPC Server handler 1 on default port 41019): State: TIMED_WAITING Blocked count: 0 Waited count: 524 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 140 (IPC Server handler 2 on default port 41019): State: TIMED_WAITING Blocked count: 0 Waited count: 555 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 141 (IPC Server handler 3 on default port 41019): State: TIMED_WAITING Blocked count: 0 Waited count: 537 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 142 (IPC Server handler 4 on default port 41019): State: TIMED_WAITING Blocked count: 0 Waited count: 533 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 152 (pool-36-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 153 (qtp153931968-153): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f1fb042ac40.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 154 (qtp153931968-154-acceptor-0@22334216-ServerConnector@2fadfd2c{HTTP/1.1, (http/1.1)}{localhost:45127}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 155 (qtp153931968-155): State: TIMED_WAITING Blocked count: 0 Waited count: 9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 156 (qtp153931968-156): State: TIMED_WAITING Blocked count: 0 Waited count: 9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 157 (Session-HouseKeeper-eb557c8-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 158 (nioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 159 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@20315e3d): State: TIMED_WAITING Blocked count: 0 Waited count: 1036 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 161 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 162 (IPC Server idle connection scanner for port 44125): State: TIMED_WAITING Blocked count: 1 Waited count: 53 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 164 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 104 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 167 (Command processor): State: WAITING Blocked count: 0 Waited count: 280 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@523a2d3d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 168 (BP-1735945732-172.17.0.2-1733400125338 heartbeating to localhost/127.0.0.1:37541): State: TIMED_WAITING Blocked count: 1480 Waited count: 1662 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 169 (pool-38-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 151 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@65a8f63c): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 163 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 160 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 170 (IPC Server handler 0 on default port 44125): State: TIMED_WAITING Blocked count: 0 Waited count: 545 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 171 (IPC Server handler 1 on default port 44125): State: TIMED_WAITING Blocked count: 0 Waited count: 546 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 172 (IPC Server handler 2 on default port 44125): State: TIMED_WAITING Blocked count: 0 Waited count: 581 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 173 (IPC Server handler 3 on default port 44125): State: TIMED_WAITING Blocked count: 0 Waited count: 583 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 174 (IPC Server handler 4 on default port 44125): State: TIMED_WAITING Blocked count: 0 Waited count: 531 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 186 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/b25dcafd-a00c-c924-93f8-4d057d2ca201/cluster_25076363-b5ee-b0f5-182c-ca76f44a18a7/data/data1)): State: TIMED_WAITING Blocked count: 31 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 185 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/b25dcafd-a00c-c924-93f8-4d057d2ca201/cluster_25076363-b5ee-b0f5-182c-ca76f44a18a7/data/data3)): State: TIMED_WAITING Blocked count: 30 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 187 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/b25dcafd-a00c-c924-93f8-4d057d2ca201/cluster_25076363-b5ee-b0f5-182c-ca76f44a18a7/data/data4)): State: TIMED_WAITING Blocked count: 21 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 188 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/b25dcafd-a00c-c924-93f8-4d057d2ca201/cluster_25076363-b5ee-b0f5-182c-ca76f44a18a7/data/data2)): State: TIMED_WAITING Blocked count: 25 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 199 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/b25dcafd-a00c-c924-93f8-4d057d2ca201/cluster_25076363-b5ee-b0f5-182c-ca76f44a18a7/data/data2/current/BP-1735945732-172.17.0.2-1733400125338): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 198 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/b25dcafd-a00c-c924-93f8-4d057d2ca201/cluster_25076363-b5ee-b0f5-182c-ca76f44a18a7/data/data3/current/BP-1735945732-172.17.0.2-1733400125338): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 200 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/b25dcafd-a00c-c924-93f8-4d057d2ca201/cluster_25076363-b5ee-b0f5-182c-ca76f44a18a7/data/data4/current/BP-1735945732-172.17.0.2-1733400125338): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 201 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/b25dcafd-a00c-c924-93f8-4d057d2ca201/cluster_25076363-b5ee-b0f5-182c-ca76f44a18a7/data/data1/current/BP-1735945732-172.17.0.2-1733400125338): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 216 (pool-23-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 215 (pool-15-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 219 (java.util.concurrent.ThreadPoolExecutor$Worker@250cff32[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 222 (java.util.concurrent.ThreadPoolExecutor$Worker@46124118[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 223 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/b25dcafd-a00c-c924-93f8-4d057d2ca201/cluster_25076363-b5ee-b0f5-182c-ca76f44a18a7/data/data5)): State: TIMED_WAITING Blocked count: 2 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 224 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/b25dcafd-a00c-c924-93f8-4d057d2ca201/cluster_25076363-b5ee-b0f5-182c-ca76f44a18a7/data/data6)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 230 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/b25dcafd-a00c-c924-93f8-4d057d2ca201/cluster_25076363-b5ee-b0f5-182c-ca76f44a18a7/data/data6/current/BP-1735945732-172.17.0.2-1733400125338): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 229 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/b25dcafd-a00c-c924-93f8-4d057d2ca201/cluster_25076363-b5ee-b0f5-182c-ca76f44a18a7/data/data5/current/BP-1735945732-172.17.0.2-1733400125338): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 234 (pool-33-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 237 (java.util.concurrent.ThreadPoolExecutor$Worker@10dc8883[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 238 (FsDatasetAsyncDiskServiceFixer): State: TIMED_WAITING Blocked count: 0 Waited count: 18 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer.run(HBaseTestingUtil.java:576) Thread 240 (NIOServerCxnFactory.SelectorThread-0): State: RUNNABLE Blocked count: 2 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 241 (NIOServerCxnFactory.SelectorThread-1): State: RUNNABLE Blocked count: 6 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 242 (NIOServerCxnFactory.AcceptThread:localhost/127.0.0.1:49240): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.select(NIOServerCnxnFactory.java:205) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.run(NIOServerCnxnFactory.java:181) Thread 239 (ConnnectionExpirer): State: TIMED_WAITING Blocked count: 0 Waited count: 52 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.NIOServerCnxnFactory$ConnectionExpirerThread.run(NIOServerCnxnFactory.java:554) Thread 243 (SessionTracker): State: TIMED_WAITING Blocked count: 0 Waited count: 259 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.SessionTrackerImpl.run(SessionTrackerImpl.java:163) Thread 244 (SyncThread:0): State: WAITING Blocked count: 16 Waited count: 358 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7af3405f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.SyncRequestProcessor.run(SyncRequestProcessor.java:170) Thread 245 (ProcessThread(sid:0 cport:49240):): State: WAITING Blocked count: 2 Waited count: 439 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@76765824 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.PrepRequestProcessor.run(PrepRequestProcessor.java:142) Thread 246 (RequestThrottler): State: WAITING Blocked count: 0 Waited count: 479 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@665c782c Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.RequestThrottler.run(RequestThrottler.java:147) Thread 247 (NIOWorkerThread-1): State: WAITING Blocked count: 0 Waited count: 96 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7213fd1e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 258 (weak-ref-cleaner-strictcontextstorage): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.lang.ref.ReferenceQueue$Lock@1282aabb Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//io.opentelemetry.context.StrictContextStorage$PendingScopes.run(StrictContextStorage.java:269) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 259 (HBase-Metrics2-1): State: TIMED_WAITING Blocked count: 0 Waited count: 638 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 260 (HMaster-EventLoopGroup-1-1): State: RUNNABLE Blocked count: 41 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 261 (Time-limited test-SendThread(127.0.0.1:49240)): State: RUNNABLE Blocked count: 16 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.zookeeper.ClientCnxnSocketNIO.doTransport(ClientCnxnSocketNIO.java:332) app//org.apache.zookeeper.ClientCnxn$SendThread.run(ClientCnxn.java:1289) Thread 262 (Time-limited test-EventThread): State: WAITING Blocked count: 22 Waited count: 56 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5369e84d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:550) Thread 263 (NIOWorkerThread-2): State: WAITING Blocked count: 2 Waited count: 97 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7213fd1e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 264 (NIOWorkerThread-3): State: WAITING Blocked count: 2 Waited count: 96 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7213fd1e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 265 (NIOWorkerThread-4): State: WAITING Blocked count: 8 Waited count: 97 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7213fd1e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 266 (zk-event-processor-pool-0): State: WAITING Blocked count: 19 Waited count: 71 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3eff1433 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 267 (NIOWorkerThread-5): State: WAITING Blocked count: 3 Waited count: 95 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7213fd1e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 268 (NIOWorkerThread-6): State: WAITING Blocked count: 3 Waited count: 96 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7213fd1e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 269 (NIOWorkerThread-7): State: WAITING Blocked count: 4 Waited count: 96 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7213fd1e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 270 (NIOWorkerThread-8): State: WAITING Blocked count: 4 Waited count: 95 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7213fd1e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 271 (NIOWorkerThread-9): State: WAITING Blocked count: 8 Waited count: 96 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7213fd1e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 272 (NIOWorkerThread-10): State: WAITING Blocked count: 2 Waited count: 96 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7213fd1e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 273 (NIOWorkerThread-11): State: WAITING Blocked count: 7 Waited count: 95 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7213fd1e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 274 (NIOWorkerThread-12): State: WAITING Blocked count: 3 Waited count: 94 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7213fd1e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 275 (NIOWorkerThread-13): State: WAITING Blocked count: 2 Waited count: 95 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7213fd1e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 276 (NIOWorkerThread-14): State: WAITING Blocked count: 5 Waited count: 95 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7213fd1e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 277 (NIOWorkerThread-15): State: WAITING Blocked count: 6 Waited count: 95 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7213fd1e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 278 (NIOWorkerThread-16): State: WAITING Blocked count: 3 Waited count: 96 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7213fd1e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 280 (RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34947): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@39afb625 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 281 (RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947): State: WAITING Blocked count: 184 Waited count: 707 Waiting on java.util.concurrent.Semaphore$NonfairSync@643ea268 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 282 (RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947): State: WAITING Blocked count: 61 Waited count: 318 Waiting on java.util.concurrent.Semaphore$NonfairSync@78091522 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 283 (RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34947): State: WAITING Blocked count: 45 Waited count: 9858 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3e91d89c Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 284 (RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=34947): State: WAITING Blocked count: 0 Waited count: 4 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7a4b5ff5 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 285 (RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=34947): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7a4b5ff5 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 286 (RpcServer.replication.FPBQ.Fifo.handler=0,queue=0,port=34947): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@6b778f36 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 287 (RpcServer.replication.FPBQ.Fifo.handler=1,queue=0,port=34947): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@5b67377f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 288 (RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=34947): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@6b8251b7 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 289 (RpcServer.metaPriority.FPBQ.Fifo.handler=0,queue=0,port=34947): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.Semaphore$NonfairSync@4b960fc5 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 292 (Time-limited test.named-queue-events-pool-0): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1ccf37be Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//com.lmax.disruptor.BlockingWaitStrategy.waitFor(BlockingWaitStrategy.java:47) app//com.lmax.disruptor.ProcessingSequenceBarrier.waitFor(ProcessingSequenceBarrier.java:56) app//com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:159) app//com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 293 (MiniHBaseClusterRegionServer-EventLoopGroup-3-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 313 (MiniHBaseClusterRegionServer-EventLoopGroup-4-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 333 (MiniHBaseClusterRegionServer-EventLoopGroup-5-1): State: RUNNABLE Blocked count: 136 Waited count: 3 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 290 (M:0;80666a11a09f:34947): State: TIMED_WAITING Blocked count: 12 Waited count: 3959 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hbase.regionserver.wal.SyncFuture.get(SyncFuture.java:169) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.blockOnSync(AbstractFSWAL.java:1029) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.doSync(AbstractFSWAL.java:1940) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$sync$2(AbstractFSWAL.java:723) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$1110/0x00007f1fb0f966b8.run(Unknown Source) app//org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:723) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:713) app//org.apache.hadoop.hbase.regionserver.HRegion.doSyncOfUnflushedWALChanges(HRegion.java:2935) app//org.apache.hadoop.hbase.regionserver.HRegion.internalPrepareFlushCache(HRegion.java:2876) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2735) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2709) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2700) app//org.apache.hadoop.hbase.regionserver.HRegion.doClose(HRegion.java:1862) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1672) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1627) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1610) Thread 354 (Monitor thread for TaskMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 52 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.monitoring.TaskMonitor$MonitorRunnable.run(TaskMonitor.java:325) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 356 (master/80666a11a09f:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 358 (master/80666a11a09f:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 360 (org.apache.hadoop.hdfs.PeerCache@23371030): State: TIMED_WAITING Blocked count: 0 Waited count: 172 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.PeerCache.run(PeerCache.java:253) app//org.apache.hadoop.hdfs.PeerCache.access$000(PeerCache.java:46) app//org.apache.hadoop.hdfs.PeerCache$1.run(PeerCache.java:124) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 378 (master:store-WAL-Roller): State: TIMED_WAITING Blocked count: 0 Waited count: 5140 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hbase.wal.AbstractWALRoller.run(AbstractWALRoller.java:180) Thread 395 (MiniHBaseClusterRegionServer-EventLoopGroup-5-2): State: RUNNABLE Blocked count: 62 Waited count: 2 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 396 (MiniHBaseClusterRegionServer-EventLoopGroup-5-3): State: RUNNABLE Blocked count: 128 Waited count: 3 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 409 (Idle-Rpc-Conn-Sweeper-pool-0): State: WAITING Blocked count: 0 Waited count: 168 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1983abfe Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 420 (SnapshotHandlerChoreCleaner): State: TIMED_WAITING Blocked count: 0 Waited count: 52 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 408 (RpcClient-timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 51344 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 433 (HMaster-EventLoopGroup-1-2): State: RUNNABLE Blocked count: 25 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 436 (HMaster-EventLoopGroup-1-3): State: RUNNABLE Blocked count: 44 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 455 (RegionServerTracker-0): State: WAITING Blocked count: 9 Waited count: 11 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@577766f0 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 470 (regionserver/80666a11a09f:0.procedureResultReporter): State: WAITING Blocked count: 18 Waited count: 37 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@bb7c9b2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 481 (regionserver/80666a11a09f:0.procedureResultReporter): State: WAITING Blocked count: 25 Waited count: 44 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@f6f5f61 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 479 (regionserver/80666a11a09f:0.procedureResultReporter): State: WAITING Blocked count: 7 Waited count: 15 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2aa5eb00 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 514 (MiniHBaseClusterRegionServer-EventLoopGroup-4-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 525 (MiniHBaseClusterRegionServer-EventLoopGroup-4-3): State: RUNNABLE Blocked count: 3 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 526 (region-location-0): State: WAITING Blocked count: 12 Waited count: 19 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@46095842 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 407 (Async-Client-Retry-Timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 51205 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 539 (RPCClient-NioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 548 (RPCClient-NioEventLoopGroup-6-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 549 (RPCClient-NioEventLoopGroup-6-3): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 560 (ForkJoinPool.commonPool-worker-2): State: TIMED_WAITING Blocked count: 0 Waited count: 502 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkUntil(LockSupport.java:410) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1726) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 582 (region-location-1): State: WAITING Blocked count: 4 Waited count: 7 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@46095842 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 583 (region-location-2): State: WAITING Blocked count: 2 Waited count: 5 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@46095842 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 985 (MutableQuantiles-0): State: TIMED_WAITING Blocked count: 0 Waited count: 954 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1048 (RPCClient-NioEventLoopGroup-6-4): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1088 (zk-permission-watcher-pool-0): State: WAITING Blocked count: 70 Waited count: 116 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3df3819 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1092 (RPCClient-NioEventLoopGroup-6-5): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1236 (RPCClient-NioEventLoopGroup-6-6): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1237 (RPCClient-NioEventLoopGroup-6-7): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1238 (RPCClient-NioEventLoopGroup-6-8): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1239 (MiniHBaseClusterRegionServer-EventLoopGroup-3-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1289 (RPCClient-NioEventLoopGroup-6-9): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1290 (RPCClient-NioEventLoopGroup-6-10): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1291 (RPCClient-NioEventLoopGroup-6-11): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1293 (RPCClient-NioEventLoopGroup-6-12): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1294 (RPCClient-NioEventLoopGroup-6-13): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1430 (ForkJoinPool.commonPool-worker-3): State: WAITING Blocked count: 0 Waited count: 942 Waiting on java.util.concurrent.ForkJoinPool@73c6f368 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 1650 (Container metrics unregistration): State: WAITING Blocked count: 11 Waited count: 70 Waiting on java.util.TaskQueue@f18bb68 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 2011 (region-location-3): State: WAITING Blocked count: 3 Waited count: 7 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@46095842 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2012 (region-location-4): State: WAITING Blocked count: 3 Waited count: 7 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@46095842 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2134 (MiniHBaseClusterRegionServer-EventLoopGroup-3-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2142 (RPCClient-NioEventLoopGroup-6-14): State: RUNNABLE Blocked count: 2 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2854 (ForkJoinPool.commonPool-worker-5): State: WAITING Blocked count: 0 Waited count: 569 Waiting on java.util.concurrent.ForkJoinPool@73c6f368 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 11442 (AsyncFSWAL-1-hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/MasterData-prefix:80666a11a09f,34947,1733400129965): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4b015450 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 11445 (Timer for 'JobHistoryServer' metrics system): State: TIMED_WAITING Blocked count: 0 Waited count: 12 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) 2024-12-05T12:11:08,985 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-05T12:11:38,986 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. Process Thread Dump: Automatic Stack Trace every 60 seconds waiting on M:0;80666a11a09f:34947 229 active threads Thread 1 (main): State: TIMED_WAITING Blocked count: 1 Waited count: 4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.FutureTask.awaitDone(FutureTask.java:444) java.base@17.0.11/java.util.concurrent.FutureTask.get(FutureTask.java:203) app//org.junit.internal.runners.statements.FailOnTimeout.getResult(FailOnTimeout.java:167) app//org.junit.internal.runners.statements.FailOnTimeout.evaluate(FailOnTimeout.java:128) app//org.apache.hadoop.hbase.SystemExitRule$1.evaluate(SystemExitRule.java:39) app//org.junit.rules.RunRules.evaluate(RunRules.java:20) app//org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) app//org.junit.runners.ParentRunner.run(ParentRunner.java:413) app//org.apache.maven.surefire.junit4.JUnit4Provider.execute(JUnit4Provider.java:316) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeWithRerun(JUnit4Provider.java:240) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeTestSet(JUnit4Provider.java:214) app//org.apache.maven.surefire.junit4.JUnit4Provider.invoke(JUnit4Provider.java:155) app//org.apache.maven.surefire.booter.ForkedBooter.runSuitesInProcess(ForkedBooter.java:385) app//org.apache.maven.surefire.booter.ForkedBooter.execute(ForkedBooter.java:162) app//org.apache.maven.surefire.booter.ForkedBooter.run(ForkedBooter.java:507) app//org.apache.maven.surefire.booter.ForkedBooter.main(ForkedBooter.java:495) Thread 2 (Reference Handler): State: RUNNABLE Blocked count: 7 Waited count: 0 Stack: java.base@17.0.11/java.lang.ref.Reference.waitForReferencePendingList(Native Method) java.base@17.0.11/java.lang.ref.Reference.processPendingReferences(Reference.java:253) java.base@17.0.11/java.lang.ref.Reference$ReferenceHandler.run(Reference.java:215) Thread 3 (Finalizer): State: WAITING Blocked count: 34 Waited count: 20 Waiting on java.lang.ref.ReferenceQueue$Lock@5d1ec60f Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) java.base@17.0.11/java.lang.ref.Finalizer$FinalizerThread.run(Finalizer.java:172) Thread 4 (Signal Dispatcher): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 12 (Common-Cleaner): State: TIMED_WAITING Blocked count: 20 Waited count: 24 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/jdk.internal.ref.CleanerImpl.run(CleanerImpl.java:140) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) java.base@17.0.11/jdk.internal.misc.InnocuousThread.run(InnocuousThread.java:162) Thread 13 (Notification Thread): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 14 (pool-1-thread-1): State: RUNNABLE Blocked count: 2 Waited count: 29 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.poll(EPollPort.java:200) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:281) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 15 (pool-1-thread-2): State: WAITING Blocked count: 0 Waited count: 31 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@464c70eb Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:275) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 16 (surefire-forkedjvm-stream-flusher): State: TIMED_WAITING Blocked count: 0 Waited count: 5828 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 18 (surefire-forkedjvm-command-thread): State: WAITING Blocked count: 0 Waited count: 59 Waiting on java.util.concurrent.CountDownLatch$Sync@4404cd3e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.CountDownLatch.await(CountDownLatch.java:230) java.base@17.0.11/sun.nio.ch.PendingFuture.get(PendingFuture.java:178) app//org.apache.maven.surefire.api.util.internal.Channels$2.read(Channels.java:127) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) app//org.apache.maven.surefire.api.util.internal.Channels$3.readImpl(Channels.java:169) app//org.apache.maven.surefire.api.util.internal.AbstractNoninterruptibleReadableChannel.read(AbstractNoninterruptibleReadableChannel.java:50) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:430) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:419) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.readMessageType(AbstractStreamDecoder.java:116) app//org.apache.maven.surefire.booter.stream.CommandDecoder.decode(CommandDecoder.java:77) app//org.apache.maven.surefire.booter.spi.CommandChannelDecoder.decode(CommandChannelDecoder.java:60) app//org.apache.maven.surefire.booter.CommandReader$CommandRunnable.run(CommandReader.java:290) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 22 (Time-limited test): State: RUNNABLE Blocked count: 12586 Waited count: 13388 Stack: java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo1(Native Method) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:197) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:154) app//org.apache.hadoop.hbase.util.ReflectionUtils.printThreadInfo(ReflectionUtils.java:181) app//org.apache.hadoop.hbase.util.Threads.printThreadInfo(Threads.java:186) app//org.apache.hadoop.hbase.util.Threads.threadDumpingIsAlive(Threads.java:113) app//org.apache.hadoop.hbase.LocalHBaseCluster.join(LocalHBaseCluster.java:396) app//org.apache.hadoop.hbase.SingleProcessHBaseCluster.waitUntilShutDown(SingleProcessHBaseCluster.java:886) app//org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1038) app//org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) app//org.apache.hadoop.hbase.snapshot.TestExportSnapshot.tearDownAfterClass(TestExportSnapshot.java:123) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) java.base@17.0.11/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) java.base@17.0.11/java.lang.reflect.Method.invoke(Method.java:568) app//org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) app//org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) app//org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) app//org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) app//org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) Thread 23 (org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner): State: WAITING Blocked count: 15 Waited count: 16 Waiting on java.lang.ref.ReferenceQueue$Lock@73bb6236 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner.run(FileSystem.java:4171) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 25 (SSL Certificates Store Monitor): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.TaskQueue@56559f82 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 34 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@210db1f9): State: TIMED_WAITING Blocked count: 0 Waited count: 1160 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 35 (GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100): State: TIMED_WAITING Blocked count: 0 Waited count: 117 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Thread 36 (pool-6-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 37 (qtp1788055457-37): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f1fb042ac40.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 38 (qtp1788055457-38): State: RUNNABLE Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f1fb042ac40.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 39 (qtp1788055457-39): State: RUNNABLE Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f1fb042ac40.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 40 (qtp1788055457-40): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f1fb042ac40.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 41 (qtp1788055457-41-acceptor-0@64a549f8-ServerConnector@322d032a{HTTP/1.1, (http/1.1)}{localhost:44819}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 42 (qtp1788055457-42): State: TIMED_WAITING Blocked count: 0 Waited count: 10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 43 (qtp1788055457-43): State: TIMED_WAITING Blocked count: 0 Waited count: 10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 44 (qtp1788055457-44): State: TIMED_WAITING Blocked count: 0 Waited count: 10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 45 (Session-HouseKeeper-48adee89-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 46 (pool-7-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 53 (FSEditLogAsync): State: WAITING Blocked count: 37 Waited count: 3500 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@463b12d1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.dequeueEdit(FSEditLogAsync.java:241) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.run(FSEditLogAsync.java:250) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 55 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 56 (IPC Server idle connection scanner for port 37541): State: TIMED_WAITING Blocked count: 1 Waited count: 59 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 58 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 116 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 61 (org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor@37f20e91): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor.run(PendingReconstructionBlocks.java:267) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 62 (DatanodeAdminMonitor-0): State: TIMED_WAITING Blocked count: 0 Waited count: 194 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 49 (org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor@308b96ef): State: TIMED_WAITING Blocked count: 0 Waited count: 116 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor.run(HeartbeatManager.java:563) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 47 (RedundancyMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 195 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) java.base@17.0.11/java.lang.Thread.sleep(Thread.java:344) java.base@17.0.11/java.util.concurrent.TimeUnit.sleep(TimeUnit.java:446) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$RedundancyMonitor.run(BlockManager.java:5352) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 48 (MarkedDeleteBlockScrubberThread): State: TIMED_WAITING Blocked count: 0 Waited count: 57137 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$MarkedDeleteBlockScrubber.run(BlockManager.java:5326) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 51 (Block report processor): State: WAITING Blocked count: 1 Waited count: 1564 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@554421ec Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.processQueue(BlockManager.java:5627) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.run(BlockManager.java:5614) Thread 57 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 54 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 64 (IPC Server handler 0 on default port 37541): State: TIMED_WAITING Blocked count: 125 Waited count: 2809 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 65 (IPC Server handler 1 on default port 37541): State: TIMED_WAITING Blocked count: 112 Waited count: 2801 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 66 (IPC Server handler 2 on default port 37541): State: TIMED_WAITING Blocked count: 104 Waited count: 2812 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 67 (IPC Server handler 3 on default port 37541): State: TIMED_WAITING Blocked count: 117 Waited count: 2819 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 68 (IPC Server handler 4 on default port 37541): State: TIMED_WAITING Blocked count: 116 Waited count: 2802 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 69 (pool-12-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 71 (org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor@2f8bc2): State: TIMED_WAITING Blocked count: 0 Waited count: 290 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor.run(LeaseManager.java:537) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 72 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor@641be552): State: TIMED_WAITING Blocked count: 0 Waited count: 116 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor.run(FSNamesystem.java:4550) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 73 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller@54c54640): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller.run(FSNamesystem.java:4592) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 74 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber@401c0ac3): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber.run(FSNamesystem.java:4689) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 75 (CacheReplicationMonitor(1339209683)): State: TIMED_WAITING Blocked count: 0 Waited count: 21 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hdfs.server.blockmanagement.CacheReplicationMonitor.run(CacheReplicationMonitor.java:186) Thread 86 (pool-18-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 87 (qtp342995557-87): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f1fb042ac40.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 88 (qtp342995557-88): State: TIMED_WAITING Blocked count: 0 Waited count: 10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 89 (qtp342995557-89-acceptor-0@49a135d1-ServerConnector@3cbfa094{HTTP/1.1, (http/1.1)}{localhost:44277}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 90 (qtp342995557-90): State: TIMED_WAITING Blocked count: 0 Waited count: 10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 91 (Session-HouseKeeper-bdabc30-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 92 (nioEventLoopGroup-2-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 93 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@753c951a): State: TIMED_WAITING Blocked count: 0 Waited count: 1157 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 95 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 96 (IPC Server idle connection scanner for port 37503): State: TIMED_WAITING Blocked count: 1 Waited count: 59 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 98 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 116 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 101 (Command processor): State: WAITING Blocked count: 1 Waited count: 361 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2bad99a9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 102 (BP-1735945732-172.17.0.2-1733400125338 heartbeating to localhost/127.0.0.1:37541): State: TIMED_WAITING Blocked count: 1581 Waited count: 1707 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 103 (pool-20-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 85 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@7de90c1a): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 97 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 94 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 104 (IPC Server handler 0 on default port 37503): State: TIMED_WAITING Blocked count: 0 Waited count: 605 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 105 (IPC Server handler 1 on default port 37503): State: TIMED_WAITING Blocked count: 0 Waited count: 610 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 106 (IPC Server handler 2 on default port 37503): State: TIMED_WAITING Blocked count: 0 Waited count: 609 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 107 (IPC Server handler 3 on default port 37503): State: TIMED_WAITING Blocked count: 0 Waited count: 595 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 108 (IPC Server handler 4 on default port 37503): State: TIMED_WAITING Blocked count: 0 Waited count: 643 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 118 (pool-26-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 119 (qtp764570459-119): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f1fb042ac40.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 120 (qtp764570459-120-acceptor-0@11e28768-ServerConnector@68f62052{HTTP/1.1, (http/1.1)}{localhost:36661}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 121 (qtp764570459-121): State: TIMED_WAITING Blocked count: 0 Waited count: 10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 123 (qtp764570459-123): State: TIMED_WAITING Blocked count: 0 Waited count: 10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 125 (Session-HouseKeeper-5b381830-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 122 (IPC Client (1055630025) connection to localhost/127.0.0.1:37541 from jenkins): State: TIMED_WAITING Blocked count: 1621 Waited count: 1621 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Thread 124 (IPC Parameter Sending Thread for localhost/127.0.0.1:37541): State: TIMED_WAITING Blocked count: 0 Waited count: 2423 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 126 (nioEventLoopGroup-4-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 127 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@649bbc64): State: TIMED_WAITING Blocked count: 0 Waited count: 1157 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 129 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 130 (IPC Server idle connection scanner for port 41019): State: TIMED_WAITING Blocked count: 1 Waited count: 59 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 132 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 116 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 135 (Command processor): State: WAITING Blocked count: 3 Waited count: 401 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@592a8c78 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 136 (BP-1735945732-172.17.0.2-1733400125338 heartbeating to localhost/127.0.0.1:37541): State: TIMED_WAITING Blocked count: 1578 Waited count: 1701 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 137 (pool-29-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 117 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@29d9e750): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 131 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 128 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 138 (IPC Server handler 0 on default port 41019): State: TIMED_WAITING Blocked count: 0 Waited count: 593 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 139 (IPC Server handler 1 on default port 41019): State: TIMED_WAITING Blocked count: 0 Waited count: 584 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 140 (IPC Server handler 2 on default port 41019): State: TIMED_WAITING Blocked count: 0 Waited count: 615 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 141 (IPC Server handler 3 on default port 41019): State: TIMED_WAITING Blocked count: 0 Waited count: 597 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 142 (IPC Server handler 4 on default port 41019): State: TIMED_WAITING Blocked count: 0 Waited count: 593 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 152 (pool-36-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 153 (qtp153931968-153): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f1fb042ac40.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 154 (qtp153931968-154-acceptor-0@22334216-ServerConnector@2fadfd2c{HTTP/1.1, (http/1.1)}{localhost:45127}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 155 (qtp153931968-155): State: TIMED_WAITING Blocked count: 0 Waited count: 10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 156 (qtp153931968-156): State: TIMED_WAITING Blocked count: 0 Waited count: 10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 157 (Session-HouseKeeper-eb557c8-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 158 (nioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 159 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@20315e3d): State: TIMED_WAITING Blocked count: 0 Waited count: 1156 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 161 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 162 (IPC Server idle connection scanner for port 44125): State: TIMED_WAITING Blocked count: 1 Waited count: 59 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 164 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 116 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 167 (Command processor): State: WAITING Blocked count: 0 Waited count: 300 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@523a2d3d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 168 (BP-1735945732-172.17.0.2-1733400125338 heartbeating to localhost/127.0.0.1:37541): State: TIMED_WAITING Blocked count: 1500 Waited count: 1702 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 169 (pool-38-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 151 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@65a8f63c): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 163 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 160 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 170 (IPC Server handler 0 on default port 44125): State: TIMED_WAITING Blocked count: 0 Waited count: 605 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 171 (IPC Server handler 1 on default port 44125): State: TIMED_WAITING Blocked count: 0 Waited count: 606 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 172 (IPC Server handler 2 on default port 44125): State: TIMED_WAITING Blocked count: 0 Waited count: 652 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 173 (IPC Server handler 3 on default port 44125): State: TIMED_WAITING Blocked count: 0 Waited count: 643 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 174 (IPC Server handler 4 on default port 44125): State: TIMED_WAITING Blocked count: 0 Waited count: 591 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 186 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/b25dcafd-a00c-c924-93f8-4d057d2ca201/cluster_25076363-b5ee-b0f5-182c-ca76f44a18a7/data/data1)): State: TIMED_WAITING Blocked count: 31 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 185 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/b25dcafd-a00c-c924-93f8-4d057d2ca201/cluster_25076363-b5ee-b0f5-182c-ca76f44a18a7/data/data3)): State: TIMED_WAITING Blocked count: 30 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 187 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/b25dcafd-a00c-c924-93f8-4d057d2ca201/cluster_25076363-b5ee-b0f5-182c-ca76f44a18a7/data/data4)): State: TIMED_WAITING Blocked count: 21 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 188 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/b25dcafd-a00c-c924-93f8-4d057d2ca201/cluster_25076363-b5ee-b0f5-182c-ca76f44a18a7/data/data2)): State: TIMED_WAITING Blocked count: 25 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 199 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/b25dcafd-a00c-c924-93f8-4d057d2ca201/cluster_25076363-b5ee-b0f5-182c-ca76f44a18a7/data/data2/current/BP-1735945732-172.17.0.2-1733400125338): State: TIMED_WAITING Blocked count: 2 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 198 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/b25dcafd-a00c-c924-93f8-4d057d2ca201/cluster_25076363-b5ee-b0f5-182c-ca76f44a18a7/data/data3/current/BP-1735945732-172.17.0.2-1733400125338): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 200 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/b25dcafd-a00c-c924-93f8-4d057d2ca201/cluster_25076363-b5ee-b0f5-182c-ca76f44a18a7/data/data4/current/BP-1735945732-172.17.0.2-1733400125338): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 201 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/b25dcafd-a00c-c924-93f8-4d057d2ca201/cluster_25076363-b5ee-b0f5-182c-ca76f44a18a7/data/data1/current/BP-1735945732-172.17.0.2-1733400125338): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 216 (pool-23-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 215 (pool-15-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 219 (java.util.concurrent.ThreadPoolExecutor$Worker@250cff32[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 222 (java.util.concurrent.ThreadPoolExecutor$Worker@46124118[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 223 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/b25dcafd-a00c-c924-93f8-4d057d2ca201/cluster_25076363-b5ee-b0f5-182c-ca76f44a18a7/data/data5)): State: TIMED_WAITING Blocked count: 2 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 224 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/b25dcafd-a00c-c924-93f8-4d057d2ca201/cluster_25076363-b5ee-b0f5-182c-ca76f44a18a7/data/data6)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 230 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/b25dcafd-a00c-c924-93f8-4d057d2ca201/cluster_25076363-b5ee-b0f5-182c-ca76f44a18a7/data/data6/current/BP-1735945732-172.17.0.2-1733400125338): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 229 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/b25dcafd-a00c-c924-93f8-4d057d2ca201/cluster_25076363-b5ee-b0f5-182c-ca76f44a18a7/data/data5/current/BP-1735945732-172.17.0.2-1733400125338): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 234 (pool-33-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 237 (java.util.concurrent.ThreadPoolExecutor$Worker@10dc8883[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 238 (FsDatasetAsyncDiskServiceFixer): State: TIMED_WAITING Blocked count: 0 Waited count: 20 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer.run(HBaseTestingUtil.java:576) Thread 240 (NIOServerCxnFactory.SelectorThread-0): State: RUNNABLE Blocked count: 2 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 241 (NIOServerCxnFactory.SelectorThread-1): State: RUNNABLE Blocked count: 6 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 242 (NIOServerCxnFactory.AcceptThread:localhost/127.0.0.1:49240): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.select(NIOServerCnxnFactory.java:205) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.run(NIOServerCnxnFactory.java:181) Thread 239 (ConnnectionExpirer): State: TIMED_WAITING Blocked count: 0 Waited count: 58 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.NIOServerCnxnFactory$ConnectionExpirerThread.run(NIOServerCnxnFactory.java:554) Thread 243 (SessionTracker): State: TIMED_WAITING Blocked count: 0 Waited count: 289 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.SessionTrackerImpl.run(SessionTrackerImpl.java:163) Thread 244 (SyncThread:0): State: WAITING Blocked count: 16 Waited count: 362 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7af3405f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.SyncRequestProcessor.run(SyncRequestProcessor.java:170) Thread 245 (ProcessThread(sid:0 cport:49240):): State: WAITING Blocked count: 2 Waited count: 443 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@76765824 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.PrepRequestProcessor.run(PrepRequestProcessor.java:142) Thread 246 (RequestThrottler): State: WAITING Blocked count: 0 Waited count: 483 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@665c782c Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.RequestThrottler.run(RequestThrottler.java:147) Thread 247 (NIOWorkerThread-1): State: WAITING Blocked count: 0 Waited count: 96 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7213fd1e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 258 (weak-ref-cleaner-strictcontextstorage): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.lang.ref.ReferenceQueue$Lock@1282aabb Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//io.opentelemetry.context.StrictContextStorage$PendingScopes.run(StrictContextStorage.java:269) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 259 (HBase-Metrics2-1): State: TIMED_WAITING Blocked count: 0 Waited count: 698 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 260 (HMaster-EventLoopGroup-1-1): State: RUNNABLE Blocked count: 41 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 261 (Time-limited test-SendThread(127.0.0.1:49240)): State: RUNNABLE Blocked count: 16 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.zookeeper.ClientCnxnSocketNIO.doTransport(ClientCnxnSocketNIO.java:332) app//org.apache.zookeeper.ClientCnxn$SendThread.run(ClientCnxn.java:1289) Thread 262 (Time-limited test-EventThread): State: WAITING Blocked count: 22 Waited count: 56 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5369e84d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:550) Thread 263 (NIOWorkerThread-2): State: WAITING Blocked count: 2 Waited count: 97 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7213fd1e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 264 (NIOWorkerThread-3): State: WAITING Blocked count: 2 Waited count: 97 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7213fd1e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 265 (NIOWorkerThread-4): State: WAITING Blocked count: 8 Waited count: 98 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7213fd1e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 266 (zk-event-processor-pool-0): State: WAITING Blocked count: 19 Waited count: 71 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3eff1433 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 267 (NIOWorkerThread-5): State: WAITING Blocked count: 3 Waited count: 96 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7213fd1e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 268 (NIOWorkerThread-6): State: WAITING Blocked count: 3 Waited count: 96 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7213fd1e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 269 (NIOWorkerThread-7): State: WAITING Blocked count: 4 Waited count: 97 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7213fd1e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 270 (NIOWorkerThread-8): State: WAITING Blocked count: 4 Waited count: 96 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7213fd1e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 271 (NIOWorkerThread-9): State: WAITING Blocked count: 8 Waited count: 96 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7213fd1e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 272 (NIOWorkerThread-10): State: WAITING Blocked count: 2 Waited count: 96 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7213fd1e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 273 (NIOWorkerThread-11): State: WAITING Blocked count: 7 Waited count: 96 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7213fd1e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 274 (NIOWorkerThread-12): State: WAITING Blocked count: 3 Waited count: 95 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7213fd1e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 275 (NIOWorkerThread-13): State: WAITING Blocked count: 2 Waited count: 95 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7213fd1e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 276 (NIOWorkerThread-14): State: WAITING Blocked count: 5 Waited count: 95 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7213fd1e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 277 (NIOWorkerThread-15): State: WAITING Blocked count: 6 Waited count: 96 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7213fd1e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 278 (NIOWorkerThread-16): State: WAITING Blocked count: 3 Waited count: 96 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7213fd1e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 280 (RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34947): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@39afb625 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 281 (RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947): State: WAITING Blocked count: 184 Waited count: 707 Waiting on java.util.concurrent.Semaphore$NonfairSync@643ea268 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 282 (RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947): State: WAITING Blocked count: 61 Waited count: 318 Waiting on java.util.concurrent.Semaphore$NonfairSync@78091522 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 283 (RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34947): State: WAITING Blocked count: 45 Waited count: 9858 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3e91d89c Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 284 (RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=34947): State: WAITING Blocked count: 0 Waited count: 4 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7a4b5ff5 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 285 (RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=34947): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7a4b5ff5 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 286 (RpcServer.replication.FPBQ.Fifo.handler=0,queue=0,port=34947): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@6b778f36 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 287 (RpcServer.replication.FPBQ.Fifo.handler=1,queue=0,port=34947): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@5b67377f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 288 (RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=34947): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@6b8251b7 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 289 (RpcServer.metaPriority.FPBQ.Fifo.handler=0,queue=0,port=34947): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.Semaphore$NonfairSync@4b960fc5 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 292 (Time-limited test.named-queue-events-pool-0): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1ccf37be Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//com.lmax.disruptor.BlockingWaitStrategy.waitFor(BlockingWaitStrategy.java:47) app//com.lmax.disruptor.ProcessingSequenceBarrier.waitFor(ProcessingSequenceBarrier.java:56) app//com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:159) app//com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 293 (MiniHBaseClusterRegionServer-EventLoopGroup-3-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 313 (MiniHBaseClusterRegionServer-EventLoopGroup-4-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 333 (MiniHBaseClusterRegionServer-EventLoopGroup-5-1): State: RUNNABLE Blocked count: 136 Waited count: 3 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 290 (M:0;80666a11a09f:34947): State: TIMED_WAITING Blocked count: 12 Waited count: 3959 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hbase.regionserver.wal.SyncFuture.get(SyncFuture.java:169) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.blockOnSync(AbstractFSWAL.java:1029) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.doSync(AbstractFSWAL.java:1940) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$sync$2(AbstractFSWAL.java:723) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$1110/0x00007f1fb0f966b8.run(Unknown Source) app//org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:723) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:713) app//org.apache.hadoop.hbase.regionserver.HRegion.doSyncOfUnflushedWALChanges(HRegion.java:2935) app//org.apache.hadoop.hbase.regionserver.HRegion.internalPrepareFlushCache(HRegion.java:2876) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2735) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2709) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2700) app//org.apache.hadoop.hbase.regionserver.HRegion.doClose(HRegion.java:1862) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1672) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1627) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1610) Thread 354 (Monitor thread for TaskMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 58 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.monitoring.TaskMonitor$MonitorRunnable.run(TaskMonitor.java:325) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 356 (master/80666a11a09f:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 358 (master/80666a11a09f:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 360 (org.apache.hadoop.hdfs.PeerCache@23371030): State: TIMED_WAITING Blocked count: 0 Waited count: 192 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.PeerCache.run(PeerCache.java:253) app//org.apache.hadoop.hdfs.PeerCache.access$000(PeerCache.java:46) app//org.apache.hadoop.hdfs.PeerCache$1.run(PeerCache.java:124) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 378 (master:store-WAL-Roller): State: TIMED_WAITING Blocked count: 0 Waited count: 5740 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hbase.wal.AbstractWALRoller.run(AbstractWALRoller.java:180) Thread 395 (MiniHBaseClusterRegionServer-EventLoopGroup-5-2): State: RUNNABLE Blocked count: 62 Waited count: 2 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 396 (MiniHBaseClusterRegionServer-EventLoopGroup-5-3): State: RUNNABLE Blocked count: 128 Waited count: 3 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 409 (Idle-Rpc-Conn-Sweeper-pool-0): State: WAITING Blocked count: 0 Waited count: 168 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1983abfe Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 420 (SnapshotHandlerChoreCleaner): State: TIMED_WAITING Blocked count: 0 Waited count: 58 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 408 (RpcClient-timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 57346 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 433 (HMaster-EventLoopGroup-1-2): State: RUNNABLE Blocked count: 25 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 436 (HMaster-EventLoopGroup-1-3): State: RUNNABLE Blocked count: 44 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 455 (RegionServerTracker-0): State: WAITING Blocked count: 9 Waited count: 11 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@577766f0 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 470 (regionserver/80666a11a09f:0.procedureResultReporter): State: WAITING Blocked count: 18 Waited count: 37 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@bb7c9b2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 481 (regionserver/80666a11a09f:0.procedureResultReporter): State: WAITING Blocked count: 25 Waited count: 44 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@f6f5f61 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 479 (regionserver/80666a11a09f:0.procedureResultReporter): State: WAITING Blocked count: 7 Waited count: 15 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2aa5eb00 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 514 (MiniHBaseClusterRegionServer-EventLoopGroup-4-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 525 (MiniHBaseClusterRegionServer-EventLoopGroup-4-3): State: RUNNABLE Blocked count: 3 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 526 (region-location-0): State: WAITING Blocked count: 12 Waited count: 19 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@46095842 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 407 (Async-Client-Retry-Timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 57208 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 539 (RPCClient-NioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 548 (RPCClient-NioEventLoopGroup-6-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 549 (RPCClient-NioEventLoopGroup-6-3): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 582 (region-location-1): State: WAITING Blocked count: 4 Waited count: 7 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@46095842 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 583 (region-location-2): State: WAITING Blocked count: 2 Waited count: 5 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@46095842 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 985 (MutableQuantiles-0): State: TIMED_WAITING Blocked count: 0 Waited count: 960 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1048 (RPCClient-NioEventLoopGroup-6-4): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1088 (zk-permission-watcher-pool-0): State: WAITING Blocked count: 70 Waited count: 116 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3df3819 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1092 (RPCClient-NioEventLoopGroup-6-5): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1236 (RPCClient-NioEventLoopGroup-6-6): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1237 (RPCClient-NioEventLoopGroup-6-7): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1238 (RPCClient-NioEventLoopGroup-6-8): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1239 (MiniHBaseClusterRegionServer-EventLoopGroup-3-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1289 (RPCClient-NioEventLoopGroup-6-9): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1290 (RPCClient-NioEventLoopGroup-6-10): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1291 (RPCClient-NioEventLoopGroup-6-11): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1293 (RPCClient-NioEventLoopGroup-6-12): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1294 (RPCClient-NioEventLoopGroup-6-13): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1430 (ForkJoinPool.commonPool-worker-3): State: WAITING Blocked count: 0 Waited count: 942 Waiting on java.util.concurrent.ForkJoinPool@73c6f368 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 1650 (Container metrics unregistration): State: WAITING Blocked count: 11 Waited count: 70 Waiting on java.util.TaskQueue@f18bb68 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 2011 (region-location-3): State: WAITING Blocked count: 3 Waited count: 7 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@46095842 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2012 (region-location-4): State: WAITING Blocked count: 3 Waited count: 7 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@46095842 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2134 (MiniHBaseClusterRegionServer-EventLoopGroup-3-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2142 (RPCClient-NioEventLoopGroup-6-14): State: RUNNABLE Blocked count: 2 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2854 (ForkJoinPool.commonPool-worker-5): State: TIMED_WAITING Blocked count: 0 Waited count: 570 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkUntil(LockSupport.java:410) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1726) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 11442 (AsyncFSWAL-1-hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/MasterData-prefix:80666a11a09f,34947,1733400129965): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4b015450 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 11445 (Timer for 'JobHistoryServer' metrics system): State: TIMED_WAITING Blocked count: 0 Waited count: 18 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 11446 (process reaper): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferStack.transfer(SynchronousQueue.java:401) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) 2024-12-05T12:12:08,986 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-05T12:12:11,153 DEBUG [master/80666a11a09f:0:becomeActiveMaster-MemStoreChunkPool Statistics {}] regionserver.ChunkCreator$MemStoreChunkPool$StatisticsThread(417): data stats (chunk size=2097152): current pool size=7, created chunk count=10, reused chunk count=25, reuseRatio=71.43% 2024-12-05T12:12:11,153 DEBUG [master/80666a11a09f:0:becomeActiveMaster-MemStoreChunkPool Statistics {}] regionserver.ChunkCreator$MemStoreChunkPool$StatisticsThread(417): index stats (chunk size=209715): current pool size=0, created chunk count=0, reused chunk count=0, reuseRatio=0 2024-12-05T12:12:18,973 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-05T12:12:38,986 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. Process Thread Dump: Automatic Stack Trace every 60 seconds waiting on M:0;80666a11a09f:34947 228 active threads Thread 1 (main): State: TIMED_WAITING Blocked count: 1 Waited count: 4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.FutureTask.awaitDone(FutureTask.java:444) java.base@17.0.11/java.util.concurrent.FutureTask.get(FutureTask.java:203) app//org.junit.internal.runners.statements.FailOnTimeout.getResult(FailOnTimeout.java:167) app//org.junit.internal.runners.statements.FailOnTimeout.evaluate(FailOnTimeout.java:128) app//org.apache.hadoop.hbase.SystemExitRule$1.evaluate(SystemExitRule.java:39) app//org.junit.rules.RunRules.evaluate(RunRules.java:20) app//org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) app//org.junit.runners.ParentRunner.run(ParentRunner.java:413) app//org.apache.maven.surefire.junit4.JUnit4Provider.execute(JUnit4Provider.java:316) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeWithRerun(JUnit4Provider.java:240) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeTestSet(JUnit4Provider.java:214) app//org.apache.maven.surefire.junit4.JUnit4Provider.invoke(JUnit4Provider.java:155) app//org.apache.maven.surefire.booter.ForkedBooter.runSuitesInProcess(ForkedBooter.java:385) app//org.apache.maven.surefire.booter.ForkedBooter.execute(ForkedBooter.java:162) app//org.apache.maven.surefire.booter.ForkedBooter.run(ForkedBooter.java:507) app//org.apache.maven.surefire.booter.ForkedBooter.main(ForkedBooter.java:495) Thread 2 (Reference Handler): State: RUNNABLE Blocked count: 7 Waited count: 0 Stack: java.base@17.0.11/java.lang.ref.Reference.waitForReferencePendingList(Native Method) java.base@17.0.11/java.lang.ref.Reference.processPendingReferences(Reference.java:253) java.base@17.0.11/java.lang.ref.Reference$ReferenceHandler.run(Reference.java:215) Thread 3 (Finalizer): State: WAITING Blocked count: 34 Waited count: 20 Waiting on java.lang.ref.ReferenceQueue$Lock@5d1ec60f Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) java.base@17.0.11/java.lang.ref.Finalizer$FinalizerThread.run(Finalizer.java:172) Thread 4 (Signal Dispatcher): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 12 (Common-Cleaner): State: TIMED_WAITING Blocked count: 20 Waited count: 25 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/jdk.internal.ref.CleanerImpl.run(CleanerImpl.java:140) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) java.base@17.0.11/jdk.internal.misc.InnocuousThread.run(InnocuousThread.java:162) Thread 13 (Notification Thread): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 14 (pool-1-thread-1): State: RUNNABLE Blocked count: 2 Waited count: 32 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.poll(EPollPort.java:200) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:281) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 15 (pool-1-thread-2): State: WAITING Blocked count: 0 Waited count: 34 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@464c70eb Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:275) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 16 (surefire-forkedjvm-stream-flusher): State: TIMED_WAITING Blocked count: 0 Waited count: 6427 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 18 (surefire-forkedjvm-command-thread): State: WAITING Blocked count: 0 Waited count: 65 Waiting on java.util.concurrent.CountDownLatch$Sync@3192b75b Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.CountDownLatch.await(CountDownLatch.java:230) java.base@17.0.11/sun.nio.ch.PendingFuture.get(PendingFuture.java:178) app//org.apache.maven.surefire.api.util.internal.Channels$2.read(Channels.java:127) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) app//org.apache.maven.surefire.api.util.internal.Channels$3.readImpl(Channels.java:169) app//org.apache.maven.surefire.api.util.internal.AbstractNoninterruptibleReadableChannel.read(AbstractNoninterruptibleReadableChannel.java:50) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:430) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:419) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.readMessageType(AbstractStreamDecoder.java:116) app//org.apache.maven.surefire.booter.stream.CommandDecoder.decode(CommandDecoder.java:77) app//org.apache.maven.surefire.booter.spi.CommandChannelDecoder.decode(CommandChannelDecoder.java:60) app//org.apache.maven.surefire.booter.CommandReader$CommandRunnable.run(CommandReader.java:290) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 22 (Time-limited test): State: RUNNABLE Blocked count: 12586 Waited count: 13389 Stack: java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo1(Native Method) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:197) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:154) app//org.apache.hadoop.hbase.util.ReflectionUtils.printThreadInfo(ReflectionUtils.java:181) app//org.apache.hadoop.hbase.util.Threads.printThreadInfo(Threads.java:186) app//org.apache.hadoop.hbase.util.Threads.threadDumpingIsAlive(Threads.java:113) app//org.apache.hadoop.hbase.LocalHBaseCluster.join(LocalHBaseCluster.java:396) app//org.apache.hadoop.hbase.SingleProcessHBaseCluster.waitUntilShutDown(SingleProcessHBaseCluster.java:886) app//org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1038) app//org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) app//org.apache.hadoop.hbase.snapshot.TestExportSnapshot.tearDownAfterClass(TestExportSnapshot.java:123) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) java.base@17.0.11/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) java.base@17.0.11/java.lang.reflect.Method.invoke(Method.java:568) app//org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) app//org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) app//org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) app//org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) app//org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) Thread 23 (org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner): State: WAITING Blocked count: 15 Waited count: 16 Waiting on java.lang.ref.ReferenceQueue$Lock@73bb6236 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner.run(FileSystem.java:4171) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 25 (SSL Certificates Store Monitor): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.TaskQueue@56559f82 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 34 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@210db1f9): State: TIMED_WAITING Blocked count: 0 Waited count: 1280 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 35 (GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100): State: TIMED_WAITING Blocked count: 0 Waited count: 129 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Thread 36 (pool-6-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 37 (qtp1788055457-37): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f1fb042ac40.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 38 (qtp1788055457-38): State: RUNNABLE Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f1fb042ac40.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 39 (qtp1788055457-39): State: RUNNABLE Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f1fb042ac40.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 40 (qtp1788055457-40): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f1fb042ac40.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 41 (qtp1788055457-41-acceptor-0@64a549f8-ServerConnector@322d032a{HTTP/1.1, (http/1.1)}{localhost:44819}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 42 (qtp1788055457-42): State: TIMED_WAITING Blocked count: 0 Waited count: 11 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 43 (qtp1788055457-43): State: TIMED_WAITING Blocked count: 0 Waited count: 11 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 44 (qtp1788055457-44): State: TIMED_WAITING Blocked count: 0 Waited count: 11 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 45 (Session-HouseKeeper-48adee89-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 46 (pool-7-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 53 (FSEditLogAsync): State: WAITING Blocked count: 37 Waited count: 3500 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@463b12d1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.dequeueEdit(FSEditLogAsync.java:241) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.run(FSEditLogAsync.java:250) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 55 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 56 (IPC Server idle connection scanner for port 37541): State: TIMED_WAITING Blocked count: 1 Waited count: 65 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 58 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 128 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 61 (org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor@37f20e91): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor.run(PendingReconstructionBlocks.java:267) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 62 (DatanodeAdminMonitor-0): State: TIMED_WAITING Blocked count: 0 Waited count: 214 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 49 (org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor@308b96ef): State: TIMED_WAITING Blocked count: 0 Waited count: 128 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor.run(HeartbeatManager.java:563) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 47 (RedundancyMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 215 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) java.base@17.0.11/java.lang.Thread.sleep(Thread.java:344) java.base@17.0.11/java.util.concurrent.TimeUnit.sleep(TimeUnit.java:446) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$RedundancyMonitor.run(BlockManager.java:5352) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 48 (MarkedDeleteBlockScrubberThread): State: TIMED_WAITING Blocked count: 0 Waited count: 63072 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$MarkedDeleteBlockScrubber.run(BlockManager.java:5326) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 51 (Block report processor): State: WAITING Blocked count: 1 Waited count: 1564 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@554421ec Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.processQueue(BlockManager.java:5627) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.run(BlockManager.java:5614) Thread 57 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 54 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 64 (IPC Server handler 0 on default port 37541): State: TIMED_WAITING Blocked count: 125 Waited count: 2869 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 65 (IPC Server handler 1 on default port 37541): State: TIMED_WAITING Blocked count: 112 Waited count: 2861 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 66 (IPC Server handler 2 on default port 37541): State: TIMED_WAITING Blocked count: 104 Waited count: 2873 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 67 (IPC Server handler 3 on default port 37541): State: TIMED_WAITING Blocked count: 118 Waited count: 2880 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 68 (IPC Server handler 4 on default port 37541): State: TIMED_WAITING Blocked count: 116 Waited count: 2863 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 69 (pool-12-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 71 (org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor@2f8bc2): State: TIMED_WAITING Blocked count: 0 Waited count: 320 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor.run(LeaseManager.java:537) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 72 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor@641be552): State: TIMED_WAITING Blocked count: 0 Waited count: 128 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor.run(FSNamesystem.java:4550) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 73 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller@54c54640): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller.run(FSNamesystem.java:4592) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 74 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber@401c0ac3): State: TIMED_WAITING Blocked count: 0 Waited count: 4 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber.run(FSNamesystem.java:4689) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 75 (CacheReplicationMonitor(1339209683)): State: TIMED_WAITING Blocked count: 0 Waited count: 23 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hdfs.server.blockmanagement.CacheReplicationMonitor.run(CacheReplicationMonitor.java:186) Thread 86 (pool-18-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 87 (qtp342995557-87): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f1fb042ac40.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 88 (qtp342995557-88): State: TIMED_WAITING Blocked count: 0 Waited count: 11 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 89 (qtp342995557-89-acceptor-0@49a135d1-ServerConnector@3cbfa094{HTTP/1.1, (http/1.1)}{localhost:44277}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 90 (qtp342995557-90): State: TIMED_WAITING Blocked count: 0 Waited count: 11 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 91 (Session-HouseKeeper-bdabc30-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 92 (nioEventLoopGroup-2-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 93 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@753c951a): State: TIMED_WAITING Blocked count: 0 Waited count: 1277 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 95 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 96 (IPC Server idle connection scanner for port 37503): State: TIMED_WAITING Blocked count: 1 Waited count: 65 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 98 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 128 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 101 (Command processor): State: WAITING Blocked count: 1 Waited count: 381 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2bad99a9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 102 (BP-1735945732-172.17.0.2-1733400125338 heartbeating to localhost/127.0.0.1:37541): State: TIMED_WAITING Blocked count: 1601 Waited count: 1747 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 103 (pool-20-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 85 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@7de90c1a): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 97 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 94 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 104 (IPC Server handler 0 on default port 37503): State: TIMED_WAITING Blocked count: 0 Waited count: 667 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 105 (IPC Server handler 1 on default port 37503): State: TIMED_WAITING Blocked count: 0 Waited count: 674 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 106 (IPC Server handler 2 on default port 37503): State: TIMED_WAITING Blocked count: 0 Waited count: 676 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 107 (IPC Server handler 3 on default port 37503): State: TIMED_WAITING Blocked count: 0 Waited count: 655 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 108 (IPC Server handler 4 on default port 37503): State: TIMED_WAITING Blocked count: 0 Waited count: 723 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 118 (pool-26-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 119 (qtp764570459-119): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f1fb042ac40.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 120 (qtp764570459-120-acceptor-0@11e28768-ServerConnector@68f62052{HTTP/1.1, (http/1.1)}{localhost:36661}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 121 (qtp764570459-121): State: TIMED_WAITING Blocked count: 0 Waited count: 11 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 123 (qtp764570459-123): State: TIMED_WAITING Blocked count: 0 Waited count: 11 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 125 (Session-HouseKeeper-5b381830-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 122 (IPC Client (1055630025) connection to localhost/127.0.0.1:37541 from jenkins): State: TIMED_WAITING Blocked count: 1677 Waited count: 1677 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Thread 124 (IPC Parameter Sending Thread for localhost/127.0.0.1:37541): State: TIMED_WAITING Blocked count: 0 Waited count: 2482 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 126 (nioEventLoopGroup-4-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 127 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@649bbc64): State: TIMED_WAITING Blocked count: 0 Waited count: 1277 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 129 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 130 (IPC Server idle connection scanner for port 41019): State: TIMED_WAITING Blocked count: 1 Waited count: 65 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 132 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 128 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 135 (Command processor): State: WAITING Blocked count: 3 Waited count: 421 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@592a8c78 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 136 (BP-1735945732-172.17.0.2-1733400125338 heartbeating to localhost/127.0.0.1:37541): State: TIMED_WAITING Blocked count: 1598 Waited count: 1742 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 137 (pool-29-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 117 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@29d9e750): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 131 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 128 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 138 (IPC Server handler 0 on default port 41019): State: TIMED_WAITING Blocked count: 0 Waited count: 653 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 139 (IPC Server handler 1 on default port 41019): State: TIMED_WAITING Blocked count: 0 Waited count: 644 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 140 (IPC Server handler 2 on default port 41019): State: TIMED_WAITING Blocked count: 0 Waited count: 675 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 141 (IPC Server handler 3 on default port 41019): State: TIMED_WAITING Blocked count: 0 Waited count: 657 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 142 (IPC Server handler 4 on default port 41019): State: TIMED_WAITING Blocked count: 0 Waited count: 653 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 152 (pool-36-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 153 (qtp153931968-153): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f1fb042ac40.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 154 (qtp153931968-154-acceptor-0@22334216-ServerConnector@2fadfd2c{HTTP/1.1, (http/1.1)}{localhost:45127}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 155 (qtp153931968-155): State: TIMED_WAITING Blocked count: 0 Waited count: 11 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 156 (qtp153931968-156): State: TIMED_WAITING Blocked count: 0 Waited count: 11 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 157 (Session-HouseKeeper-eb557c8-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 158 (nioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 159 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@20315e3d): State: TIMED_WAITING Blocked count: 0 Waited count: 1276 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 161 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 162 (IPC Server idle connection scanner for port 44125): State: TIMED_WAITING Blocked count: 1 Waited count: 65 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 164 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 128 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 167 (Command processor): State: WAITING Blocked count: 0 Waited count: 320 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@523a2d3d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 168 (BP-1735945732-172.17.0.2-1733400125338 heartbeating to localhost/127.0.0.1:37541): State: TIMED_WAITING Blocked count: 1520 Waited count: 1742 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 169 (pool-38-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 151 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@65a8f63c): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 163 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 160 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 170 (IPC Server handler 0 on default port 44125): State: TIMED_WAITING Blocked count: 0 Waited count: 665 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 171 (IPC Server handler 1 on default port 44125): State: TIMED_WAITING Blocked count: 0 Waited count: 666 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 172 (IPC Server handler 2 on default port 44125): State: TIMED_WAITING Blocked count: 0 Waited count: 712 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 173 (IPC Server handler 3 on default port 44125): State: TIMED_WAITING Blocked count: 0 Waited count: 703 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 174 (IPC Server handler 4 on default port 44125): State: TIMED_WAITING Blocked count: 0 Waited count: 651 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 186 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/b25dcafd-a00c-c924-93f8-4d057d2ca201/cluster_25076363-b5ee-b0f5-182c-ca76f44a18a7/data/data1)): State: TIMED_WAITING Blocked count: 31 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 185 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/b25dcafd-a00c-c924-93f8-4d057d2ca201/cluster_25076363-b5ee-b0f5-182c-ca76f44a18a7/data/data3)): State: TIMED_WAITING Blocked count: 30 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 187 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/b25dcafd-a00c-c924-93f8-4d057d2ca201/cluster_25076363-b5ee-b0f5-182c-ca76f44a18a7/data/data4)): State: TIMED_WAITING Blocked count: 21 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 188 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/b25dcafd-a00c-c924-93f8-4d057d2ca201/cluster_25076363-b5ee-b0f5-182c-ca76f44a18a7/data/data2)): State: TIMED_WAITING Blocked count: 25 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 199 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/b25dcafd-a00c-c924-93f8-4d057d2ca201/cluster_25076363-b5ee-b0f5-182c-ca76f44a18a7/data/data2/current/BP-1735945732-172.17.0.2-1733400125338): State: TIMED_WAITING Blocked count: 2 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 198 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/b25dcafd-a00c-c924-93f8-4d057d2ca201/cluster_25076363-b5ee-b0f5-182c-ca76f44a18a7/data/data3/current/BP-1735945732-172.17.0.2-1733400125338): State: TIMED_WAITING Blocked count: 1 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 200 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/b25dcafd-a00c-c924-93f8-4d057d2ca201/cluster_25076363-b5ee-b0f5-182c-ca76f44a18a7/data/data4/current/BP-1735945732-172.17.0.2-1733400125338): State: TIMED_WAITING Blocked count: 1 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 201 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/b25dcafd-a00c-c924-93f8-4d057d2ca201/cluster_25076363-b5ee-b0f5-182c-ca76f44a18a7/data/data1/current/BP-1735945732-172.17.0.2-1733400125338): State: TIMED_WAITING Blocked count: 1 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 216 (pool-23-thread-1): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@32719d6b Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 215 (pool-15-thread-1): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@33b072d8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 219 (java.util.concurrent.ThreadPoolExecutor$Worker@250cff32[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 222 (java.util.concurrent.ThreadPoolExecutor$Worker@46124118[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 223 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/b25dcafd-a00c-c924-93f8-4d057d2ca201/cluster_25076363-b5ee-b0f5-182c-ca76f44a18a7/data/data5)): State: TIMED_WAITING Blocked count: 2 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 224 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/b25dcafd-a00c-c924-93f8-4d057d2ca201/cluster_25076363-b5ee-b0f5-182c-ca76f44a18a7/data/data6)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 230 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/b25dcafd-a00c-c924-93f8-4d057d2ca201/cluster_25076363-b5ee-b0f5-182c-ca76f44a18a7/data/data6/current/BP-1735945732-172.17.0.2-1733400125338): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 229 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/b25dcafd-a00c-c924-93f8-4d057d2ca201/cluster_25076363-b5ee-b0f5-182c-ca76f44a18a7/data/data5/current/BP-1735945732-172.17.0.2-1733400125338): State: TIMED_WAITING Blocked count: 3 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 234 (pool-33-thread-1): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@54189442 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 237 (java.util.concurrent.ThreadPoolExecutor$Worker@10dc8883[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 238 (FsDatasetAsyncDiskServiceFixer): State: TIMED_WAITING Blocked count: 0 Waited count: 22 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer.run(HBaseTestingUtil.java:576) Thread 240 (NIOServerCxnFactory.SelectorThread-0): State: RUNNABLE Blocked count: 2 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 241 (NIOServerCxnFactory.SelectorThread-1): State: RUNNABLE Blocked count: 6 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 242 (NIOServerCxnFactory.AcceptThread:localhost/127.0.0.1:49240): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.select(NIOServerCnxnFactory.java:205) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.run(NIOServerCnxnFactory.java:181) Thread 239 (ConnnectionExpirer): State: TIMED_WAITING Blocked count: 0 Waited count: 64 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.NIOServerCnxnFactory$ConnectionExpirerThread.run(NIOServerCnxnFactory.java:554) Thread 243 (SessionTracker): State: TIMED_WAITING Blocked count: 0 Waited count: 319 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.SessionTrackerImpl.run(SessionTrackerImpl.java:163) Thread 244 (SyncThread:0): State: WAITING Blocked count: 16 Waited count: 366 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7af3405f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.SyncRequestProcessor.run(SyncRequestProcessor.java:170) Thread 245 (ProcessThread(sid:0 cport:49240):): State: WAITING Blocked count: 2 Waited count: 447 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@76765824 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.PrepRequestProcessor.run(PrepRequestProcessor.java:142) Thread 246 (RequestThrottler): State: WAITING Blocked count: 0 Waited count: 487 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@665c782c Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.RequestThrottler.run(RequestThrottler.java:147) Thread 247 (NIOWorkerThread-1): State: WAITING Blocked count: 0 Waited count: 97 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7213fd1e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 258 (weak-ref-cleaner-strictcontextstorage): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.lang.ref.ReferenceQueue$Lock@1282aabb Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//io.opentelemetry.context.StrictContextStorage$PendingScopes.run(StrictContextStorage.java:269) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 259 (HBase-Metrics2-1): State: TIMED_WAITING Blocked count: 0 Waited count: 761 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 260 (HMaster-EventLoopGroup-1-1): State: RUNNABLE Blocked count: 41 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 261 (Time-limited test-SendThread(127.0.0.1:49240)): State: RUNNABLE Blocked count: 16 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.zookeeper.ClientCnxnSocketNIO.doTransport(ClientCnxnSocketNIO.java:332) app//org.apache.zookeeper.ClientCnxn$SendThread.run(ClientCnxn.java:1289) Thread 262 (Time-limited test-EventThread): State: WAITING Blocked count: 22 Waited count: 56 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5369e84d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:550) Thread 263 (NIOWorkerThread-2): State: WAITING Blocked count: 2 Waited count: 98 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7213fd1e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 264 (NIOWorkerThread-3): State: WAITING Blocked count: 2 Waited count: 97 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7213fd1e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 265 (NIOWorkerThread-4): State: WAITING Blocked count: 8 Waited count: 98 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7213fd1e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 266 (zk-event-processor-pool-0): State: WAITING Blocked count: 19 Waited count: 71 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3eff1433 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 267 (NIOWorkerThread-5): State: WAITING Blocked count: 3 Waited count: 96 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7213fd1e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 268 (NIOWorkerThread-6): State: WAITING Blocked count: 3 Waited count: 97 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7213fd1e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 269 (NIOWorkerThread-7): State: WAITING Blocked count: 4 Waited count: 97 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7213fd1e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 270 (NIOWorkerThread-8): State: WAITING Blocked count: 4 Waited count: 96 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7213fd1e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 271 (NIOWorkerThread-9): State: WAITING Blocked count: 8 Waited count: 97 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7213fd1e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 272 (NIOWorkerThread-10): State: WAITING Blocked count: 2 Waited count: 97 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7213fd1e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 273 (NIOWorkerThread-11): State: WAITING Blocked count: 7 Waited count: 96 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7213fd1e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 274 (NIOWorkerThread-12): State: WAITING Blocked count: 3 Waited count: 95 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7213fd1e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 275 (NIOWorkerThread-13): State: WAITING Blocked count: 2 Waited count: 96 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7213fd1e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 276 (NIOWorkerThread-14): State: WAITING Blocked count: 5 Waited count: 96 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7213fd1e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 277 (NIOWorkerThread-15): State: WAITING Blocked count: 6 Waited count: 96 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7213fd1e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 278 (NIOWorkerThread-16): State: WAITING Blocked count: 3 Waited count: 97 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7213fd1e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 280 (RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34947): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@39afb625 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 281 (RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947): State: WAITING Blocked count: 184 Waited count: 707 Waiting on java.util.concurrent.Semaphore$NonfairSync@643ea268 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 282 (RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947): State: WAITING Blocked count: 61 Waited count: 318 Waiting on java.util.concurrent.Semaphore$NonfairSync@78091522 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 283 (RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34947): State: WAITING Blocked count: 45 Waited count: 9858 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3e91d89c Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 284 (RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=34947): State: WAITING Blocked count: 0 Waited count: 4 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7a4b5ff5 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 285 (RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=34947): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7a4b5ff5 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 286 (RpcServer.replication.FPBQ.Fifo.handler=0,queue=0,port=34947): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@6b778f36 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 287 (RpcServer.replication.FPBQ.Fifo.handler=1,queue=0,port=34947): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@5b67377f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 288 (RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=34947): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@6b8251b7 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 289 (RpcServer.metaPriority.FPBQ.Fifo.handler=0,queue=0,port=34947): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.Semaphore$NonfairSync@4b960fc5 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 292 (Time-limited test.named-queue-events-pool-0): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1ccf37be Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//com.lmax.disruptor.BlockingWaitStrategy.waitFor(BlockingWaitStrategy.java:47) app//com.lmax.disruptor.ProcessingSequenceBarrier.waitFor(ProcessingSequenceBarrier.java:56) app//com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:159) app//com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 293 (MiniHBaseClusterRegionServer-EventLoopGroup-3-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 313 (MiniHBaseClusterRegionServer-EventLoopGroup-4-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 333 (MiniHBaseClusterRegionServer-EventLoopGroup-5-1): State: RUNNABLE Blocked count: 136 Waited count: 3 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 290 (M:0;80666a11a09f:34947): State: TIMED_WAITING Blocked count: 12 Waited count: 3959 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hbase.regionserver.wal.SyncFuture.get(SyncFuture.java:169) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.blockOnSync(AbstractFSWAL.java:1029) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.doSync(AbstractFSWAL.java:1940) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$sync$2(AbstractFSWAL.java:723) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$1110/0x00007f1fb0f966b8.run(Unknown Source) app//org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:723) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:713) app//org.apache.hadoop.hbase.regionserver.HRegion.doSyncOfUnflushedWALChanges(HRegion.java:2935) app//org.apache.hadoop.hbase.regionserver.HRegion.internalPrepareFlushCache(HRegion.java:2876) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2735) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2709) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2700) app//org.apache.hadoop.hbase.regionserver.HRegion.doClose(HRegion.java:1862) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1672) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1627) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1610) Thread 354 (Monitor thread for TaskMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 64 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.monitoring.TaskMonitor$MonitorRunnable.run(TaskMonitor.java:325) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 356 (master/80666a11a09f:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 358 (master/80666a11a09f:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 360 (org.apache.hadoop.hdfs.PeerCache@23371030): State: TIMED_WAITING Blocked count: 0 Waited count: 212 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.PeerCache.run(PeerCache.java:253) app//org.apache.hadoop.hdfs.PeerCache.access$000(PeerCache.java:46) app//org.apache.hadoop.hdfs.PeerCache$1.run(PeerCache.java:124) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 378 (master:store-WAL-Roller): State: TIMED_WAITING Blocked count: 0 Waited count: 6339 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hbase.wal.AbstractWALRoller.run(AbstractWALRoller.java:180) Thread 395 (MiniHBaseClusterRegionServer-EventLoopGroup-5-2): State: RUNNABLE Blocked count: 62 Waited count: 2 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 396 (MiniHBaseClusterRegionServer-EventLoopGroup-5-3): State: RUNNABLE Blocked count: 128 Waited count: 3 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 409 (Idle-Rpc-Conn-Sweeper-pool-0): State: WAITING Blocked count: 0 Waited count: 168 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1983abfe Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 420 (SnapshotHandlerChoreCleaner): State: TIMED_WAITING Blocked count: 0 Waited count: 64 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 408 (RpcClient-timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 63348 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 433 (HMaster-EventLoopGroup-1-2): State: RUNNABLE Blocked count: 25 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 436 (HMaster-EventLoopGroup-1-3): State: RUNNABLE Blocked count: 44 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 455 (RegionServerTracker-0): State: WAITING Blocked count: 9 Waited count: 11 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@577766f0 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 470 (regionserver/80666a11a09f:0.procedureResultReporter): State: WAITING Blocked count: 18 Waited count: 37 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@bb7c9b2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 481 (regionserver/80666a11a09f:0.procedureResultReporter): State: WAITING Blocked count: 25 Waited count: 44 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@f6f5f61 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 479 (regionserver/80666a11a09f:0.procedureResultReporter): State: WAITING Blocked count: 7 Waited count: 15 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2aa5eb00 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 514 (MiniHBaseClusterRegionServer-EventLoopGroup-4-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 525 (MiniHBaseClusterRegionServer-EventLoopGroup-4-3): State: RUNNABLE Blocked count: 3 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 526 (region-location-0): State: WAITING Blocked count: 12 Waited count: 19 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@46095842 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 407 (Async-Client-Retry-Timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 63209 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 539 (RPCClient-NioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 548 (RPCClient-NioEventLoopGroup-6-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 549 (RPCClient-NioEventLoopGroup-6-3): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 582 (region-location-1): State: WAITING Blocked count: 4 Waited count: 7 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@46095842 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 583 (region-location-2): State: WAITING Blocked count: 2 Waited count: 5 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@46095842 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 985 (MutableQuantiles-0): State: TIMED_WAITING Blocked count: 0 Waited count: 966 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1048 (RPCClient-NioEventLoopGroup-6-4): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1088 (zk-permission-watcher-pool-0): State: WAITING Blocked count: 70 Waited count: 116 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3df3819 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1092 (RPCClient-NioEventLoopGroup-6-5): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1236 (RPCClient-NioEventLoopGroup-6-6): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1237 (RPCClient-NioEventLoopGroup-6-7): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1238 (RPCClient-NioEventLoopGroup-6-8): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1239 (MiniHBaseClusterRegionServer-EventLoopGroup-3-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1289 (RPCClient-NioEventLoopGroup-6-9): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1290 (RPCClient-NioEventLoopGroup-6-10): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1291 (RPCClient-NioEventLoopGroup-6-11): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1293 (RPCClient-NioEventLoopGroup-6-12): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1294 (RPCClient-NioEventLoopGroup-6-13): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1430 (ForkJoinPool.commonPool-worker-3): State: TIMED_WAITING Blocked count: 0 Waited count: 943 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkUntil(LockSupport.java:410) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1726) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 1650 (Container metrics unregistration): State: WAITING Blocked count: 11 Waited count: 70 Waiting on java.util.TaskQueue@f18bb68 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 2011 (region-location-3): State: WAITING Blocked count: 3 Waited count: 7 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@46095842 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2012 (region-location-4): State: WAITING Blocked count: 3 Waited count: 7 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@46095842 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2134 (MiniHBaseClusterRegionServer-EventLoopGroup-3-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2142 (RPCClient-NioEventLoopGroup-6-14): State: RUNNABLE Blocked count: 2 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 11442 (AsyncFSWAL-1-hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/MasterData-prefix:80666a11a09f,34947,1733400129965): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4b015450 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 11446 (process reaper): State: TIMED_WAITING Blocked count: 0 Waited count: 5 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferStack.transfer(SynchronousQueue.java:401) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 11451 (Timer for 'JobHistoryServer' metrics system): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) 2024-12-05T12:13:08,987 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-05T12:13:38,987 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-05T12:13:46,034 DEBUG [M:0;80666a11a09f:34947 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733400526019Disabling compacts and flushes for region at 1733400526019Disabling writes for close at 1733400526032 (+13 ms)Obtaining lock to block concurrent updates at 1733400526032Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1733400526032Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=1022003, getHeapSize=1224512, getOffHeapSize=0, getCellsCount=2672 at 1733400526032Failed flush master:store,,1.1595e783b53d99cd5eef43b6debb2682., putting online again at 1733400826033 (+300001 ms) 2024-12-05T12:13:46,034 WARN [M:0;80666a11a09f:34947 {}] region.MasterRegion(134): Failed to close region org.apache.hadoop.hbase.regionserver.wal.WALSyncTimeoutIOException: org.apache.hadoop.hbase.exceptions.TimeoutIOException: Failed to get sync result after 300000 ms for txid=4594, WAL system stuck? at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.blockOnSync(AbstractFSWAL.java:1033) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.doSync(AbstractFSWAL.java:1940) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$sync$2(AbstractFSWAL.java:723) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:723) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:713) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.doSyncOfUnflushedWALChanges(HRegion.java:2935) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalPrepareFlushCache(HRegion.java:2876) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2735) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2709) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2700) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.doClose(HRegion.java:1862) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1672) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1627) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1610) ~[classes/:?] at org.apache.hadoop.hbase.master.region.MasterRegion.closeRegion(MasterRegion.java:132) ~[classes/:?] at org.apache.hadoop.hbase.master.region.MasterRegion.close(MasterRegion.java:205) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.stopServiceThreads(HMaster.java:1819) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.run(HMaster.java:631) ~[classes/:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.exceptions.TimeoutIOException: Failed to get sync result after 300000 ms for txid=4594, WAL system stuck? at org.apache.hadoop.hbase.regionserver.wal.SyncFuture.get(SyncFuture.java:171) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.blockOnSync(AbstractFSWAL.java:1029) ~[classes/:?] ... 19 more 2024-12-05T12:13:46,036 WARN [Close-WAL-Writer-0 {}] wal.AsyncProtobufLogWriter(165): normal close failed, try recover java.lang.NullPointerException: Cannot invoke "org.apache.hbase.thirdparty.io.netty.buffer.ByteBuf.ensureWritable(int)" because "this.buf" is null at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.writeInt(FanOutOneBlockAsyncDFSOutput.java:391) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AsyncProtobufLogWriter.lambda$writeWALTrailerAndMagic$3(AsyncProtobufLogWriter.java:247) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AsyncProtobufLogWriter.writeWALMetadata(AsyncProtobufLogWriter.java:203) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AsyncProtobufLogWriter.writeWALTrailerAndMagic(AsyncProtobufLogWriter.java:240) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractProtobufLogWriter.writeWALTrailer(AbstractProtobufLogWriter.java:252) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AsyncProtobufLogWriter.close(AsyncProtobufLogWriter.java:162) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2041) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-05T12:13:46,038 DEBUG [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(47): Initialize RecoverLeaseFSUtils 2024-12-05T12:13:46,038 DEBUG [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(59): set recoverLeaseMethod to org.apache.hadoop.fs.LeaseRecoverable.recoverLease() 2024-12-05T12:13:46,038 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file /user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/MasterData/WALs/80666a11a09f,34947,1733400129965/80666a11a09f%2C34947%2C1733400129965.1733400131616 2024-12-05T12:13:46,042 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=0 on file=/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/MasterData/WALs/80666a11a09f,34947,1733400129965/80666a11a09f%2C34947%2C1733400129965.1733400131616 after 1ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.recoverAndClose(FanOutOneBlockAsyncDFSOutput.java:605) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AsyncProtobufLogWriter.close(AsyncProtobufLogWriter.java:166) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2041) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-05T12:13:46,042 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.InterruptedIOException: Operation cancelled at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.checkIfCancelled(RecoverLeaseFSUtils.java:269) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:159) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.recoverAndClose(FanOutOneBlockAsyncDFSOutput.java:605) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AsyncProtobufLogWriter.close(AsyncProtobufLogWriter.java:166) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2041) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-05T12:13:46,042 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/MasterData/WALs/80666a11a09f,34947,1733400129965/80666a11a09f%2C34947%2C1733400129965.1733400131616 2024-12-05T12:13:46,042 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=0 on file=hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/MasterData/WALs/80666a11a09f,34947,1733400129965/80666a11a09f%2C34947%2C1733400129965.1733400131616 after 0ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Process Thread Dump: Automatic Stack Trace every 60 seconds waiting on M:0;80666a11a09f:34947 229 active threads Thread 1 (main): State: TIMED_WAITING Blocked count: 1 Waited count: 4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.FutureTask.awaitDone(FutureTask.java:444) java.base@17.0.11/java.util.concurrent.FutureTask.get(FutureTask.java:203) app//org.junit.internal.runners.statements.FailOnTimeout.getResult(FailOnTimeout.java:167) app//org.junit.internal.runners.statements.FailOnTimeout.evaluate(FailOnTimeout.java:128) app//org.apache.hadoop.hbase.SystemExitRule$1.evaluate(SystemExitRule.java:39) app//org.junit.rules.RunRules.evaluate(RunRules.java:20) app//org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) app//org.junit.runners.ParentRunner.run(ParentRunner.java:413) app//org.apache.maven.surefire.junit4.JUnit4Provider.execute(JUnit4Provider.java:316) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeWithRerun(JUnit4Provider.java:240) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeTestSet(JUnit4Provider.java:214) app//org.apache.maven.surefire.junit4.JUnit4Provider.invoke(JUnit4Provider.java:155) app//org.apache.maven.surefire.booter.ForkedBooter.runSuitesInProcess(ForkedBooter.java:385) app//org.apache.maven.surefire.booter.ForkedBooter.execute(ForkedBooter.java:162) app//org.apache.maven.surefire.booter.ForkedBooter.run(ForkedBooter.java:507) app//org.apache.maven.surefire.booter.ForkedBooter.main(ForkedBooter.java:495) Thread 2 (Reference Handler): State: RUNNABLE Blocked count: 7 Waited count: 0 Stack: java.base@17.0.11/java.lang.ref.Reference.waitForReferencePendingList(Native Method) java.base@17.0.11/java.lang.ref.Reference.processPendingReferences(Reference.java:253) java.base@17.0.11/java.lang.ref.Reference$ReferenceHandler.run(Reference.java:215) Thread 3 (Finalizer): State: WAITING Blocked count: 34 Waited count: 20 Waiting on java.lang.ref.ReferenceQueue$Lock@5d1ec60f Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) java.base@17.0.11/java.lang.ref.Finalizer$FinalizerThread.run(Finalizer.java:172) Thread 4 (Signal Dispatcher): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 12 (Common-Cleaner): State: TIMED_WAITING Blocked count: 20 Waited count: 26 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/jdk.internal.ref.CleanerImpl.run(CleanerImpl.java:140) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) java.base@17.0.11/jdk.internal.misc.InnocuousThread.run(InnocuousThread.java:162) Thread 13 (Notification Thread): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 14 (pool-1-thread-1): State: RUNNABLE Blocked count: 2 Waited count: 35 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.poll(EPollPort.java:200) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:281) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 15 (pool-1-thread-2): State: WAITING Blocked count: 0 Waited count: 37 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@464c70eb Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:275) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 16 (surefire-forkedjvm-stream-flusher): State: TIMED_WAITING Blocked count: 0 Waited count: 7027 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 18 (surefire-forkedjvm-command-thread): State: WAITING Blocked count: 0 Waited count: 71 Waiting on java.util.concurrent.CountDownLatch$Sync@32d2d199 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.CountDownLatch.await(CountDownLatch.java:230) java.base@17.0.11/sun.nio.ch.PendingFuture.get(PendingFuture.java:178) app//org.apache.maven.surefire.api.util.internal.Channels$2.read(Channels.java:127) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) app//org.apache.maven.surefire.api.util.internal.Channels$3.readImpl(Channels.java:169) app//org.apache.maven.surefire.api.util.internal.AbstractNoninterruptibleReadableChannel.read(AbstractNoninterruptibleReadableChannel.java:50) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:430) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:419) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.readMessageType(AbstractStreamDecoder.java:116) app//org.apache.maven.surefire.booter.stream.CommandDecoder.decode(CommandDecoder.java:77) app//org.apache.maven.surefire.booter.spi.CommandChannelDecoder.decode(CommandChannelDecoder.java:60) app//org.apache.maven.surefire.booter.CommandReader$CommandRunnable.run(CommandReader.java:290) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 22 (Time-limited test): State: RUNNABLE Blocked count: 12586 Waited count: 13390 Stack: java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo1(Native Method) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:197) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:154) app//org.apache.hadoop.hbase.util.ReflectionUtils.printThreadInfo(ReflectionUtils.java:181) app//org.apache.hadoop.hbase.util.Threads.printThreadInfo(Threads.java:186) app//org.apache.hadoop.hbase.util.Threads.threadDumpingIsAlive(Threads.java:113) app//org.apache.hadoop.hbase.LocalHBaseCluster.join(LocalHBaseCluster.java:396) app//org.apache.hadoop.hbase.SingleProcessHBaseCluster.waitUntilShutDown(SingleProcessHBaseCluster.java:886) app//org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1038) app//org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) app//org.apache.hadoop.hbase.snapshot.TestExportSnapshot.tearDownAfterClass(TestExportSnapshot.java:123) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) java.base@17.0.11/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) java.base@17.0.11/java.lang.reflect.Method.invoke(Method.java:568) app//org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) app//org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) app//org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) app//org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) app//org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) Thread 23 (org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner): State: WAITING Blocked count: 15 Waited count: 16 Waiting on java.lang.ref.ReferenceQueue$Lock@73bb6236 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner.run(FileSystem.java:4171) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 25 (SSL Certificates Store Monitor): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.TaskQueue@56559f82 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 34 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@210db1f9): State: TIMED_WAITING Blocked count: 0 Waited count: 1400 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 35 (GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100): State: TIMED_WAITING Blocked count: 0 Waited count: 141 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Thread 36 (pool-6-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 37 (qtp1788055457-37): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f1fb042ac40.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 38 (qtp1788055457-38): State: RUNNABLE Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f1fb042ac40.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 39 (qtp1788055457-39): State: RUNNABLE Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f1fb042ac40.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 40 (qtp1788055457-40): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f1fb042ac40.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 41 (qtp1788055457-41-acceptor-0@64a549f8-ServerConnector@322d032a{HTTP/1.1, (http/1.1)}{localhost:44819}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 42 (qtp1788055457-42): State: TIMED_WAITING Blocked count: 0 Waited count: 12 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 43 (qtp1788055457-43): State: TIMED_WAITING Blocked count: 0 Waited count: 12 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 44 (qtp1788055457-44): State: TIMED_WAITING Blocked count: 0 Waited count: 12 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 45 (Session-HouseKeeper-48adee89-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 46 (pool-7-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 53 (FSEditLogAsync): State: WAITING Blocked count: 37 Waited count: 3500 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@463b12d1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.dequeueEdit(FSEditLogAsync.java:241) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.run(FSEditLogAsync.java:250) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 55 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 56 (IPC Server idle connection scanner for port 37541): State: TIMED_WAITING Blocked count: 1 Waited count: 71 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 58 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 140 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 61 (org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor@37f20e91): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor.run(PendingReconstructionBlocks.java:267) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 62 (DatanodeAdminMonitor-0): State: TIMED_WAITING Blocked count: 0 Waited count: 234 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 49 (org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor@308b96ef): State: TIMED_WAITING Blocked count: 0 Waited count: 140 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor.run(HeartbeatManager.java:563) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 47 (RedundancyMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 235 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) java.base@17.0.11/java.lang.Thread.sleep(Thread.java:344) java.base@17.0.11/java.util.concurrent.TimeUnit.sleep(TimeUnit.java:446) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$RedundancyMonitor.run(BlockManager.java:5352) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 48 (MarkedDeleteBlockScrubberThread): State: TIMED_WAITING Blocked count: 0 Waited count: 69024 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$MarkedDeleteBlockScrubber.run(BlockManager.java:5326) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 51 (Block report processor): State: WAITING Blocked count: 1 Waited count: 1564 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@554421ec Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.processQueue(BlockManager.java:5627) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.run(BlockManager.java:5614) Thread 57 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 54 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 64 (IPC Server handler 0 on default port 37541): State: TIMED_WAITING Blocked count: 125 Waited count: 2930 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 65 (IPC Server handler 1 on default port 37541): State: TIMED_WAITING Blocked count: 114 Waited count: 2923 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 66 (IPC Server handler 2 on default port 37541): State: TIMED_WAITING Blocked count: 104 Waited count: 2934 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 67 (IPC Server handler 3 on default port 37541): State: TIMED_WAITING Blocked count: 118 Waited count: 2940 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 68 (IPC Server handler 4 on default port 37541): State: TIMED_WAITING Blocked count: 120 Waited count: 2924 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 69 (pool-12-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 71 (org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor@2f8bc2): State: TIMED_WAITING Blocked count: 0 Waited count: 350 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor.run(LeaseManager.java:537) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 72 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor@641be552): State: TIMED_WAITING Blocked count: 0 Waited count: 140 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor.run(FSNamesystem.java:4550) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 73 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller@54c54640): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller.run(FSNamesystem.java:4592) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 74 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber@401c0ac3): State: TIMED_WAITING Blocked count: 0 Waited count: 4 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber.run(FSNamesystem.java:4689) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 75 (CacheReplicationMonitor(1339209683)): State: TIMED_WAITING Blocked count: 0 Waited count: 25 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hdfs.server.blockmanagement.CacheReplicationMonitor.run(CacheReplicationMonitor.java:186) Thread 86 (pool-18-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 87 (qtp342995557-87): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f1fb042ac40.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 88 (qtp342995557-88): State: TIMED_WAITING Blocked count: 0 Waited count: 12 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 89 (qtp342995557-89-acceptor-0@49a135d1-ServerConnector@3cbfa094{HTTP/1.1, (http/1.1)}{localhost:44277}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 90 (qtp342995557-90): State: TIMED_WAITING Blocked count: 0 Waited count: 12 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 91 (Session-HouseKeeper-bdabc30-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 92 (nioEventLoopGroup-2-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 93 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@753c951a): State: TIMED_WAITING Blocked count: 0 Waited count: 1397 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 95 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 96 (IPC Server idle connection scanner for port 37503): State: TIMED_WAITING Blocked count: 1 Waited count: 71 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 98 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 140 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 101 (Command processor): State: WAITING Blocked count: 1 Waited count: 401 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2bad99a9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 102 (BP-1735945732-172.17.0.2-1733400125338 heartbeating to localhost/127.0.0.1:37541): State: TIMED_WAITING Blocked count: 1621 Waited count: 1788 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 103 (pool-20-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 85 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@7de90c1a): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 97 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 94 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 104 (IPC Server handler 0 on default port 37503): State: TIMED_WAITING Blocked count: 0 Waited count: 733 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 105 (IPC Server handler 1 on default port 37503): State: TIMED_WAITING Blocked count: 0 Waited count: 747 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 106 (IPC Server handler 2 on default port 37503): State: TIMED_WAITING Blocked count: 0 Waited count: 741 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 107 (IPC Server handler 3 on default port 37503): State: TIMED_WAITING Blocked count: 0 Waited count: 715 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 108 (IPC Server handler 4 on default port 37503): State: TIMED_WAITING Blocked count: 0 Waited count: 788 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 118 (pool-26-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 119 (qtp764570459-119): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f1fb042ac40.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 120 (qtp764570459-120-acceptor-0@11e28768-ServerConnector@68f62052{HTTP/1.1, (http/1.1)}{localhost:36661}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 121 (qtp764570459-121): State: TIMED_WAITING Blocked count: 0 Waited count: 12 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 123 (qtp764570459-123): State: TIMED_WAITING Blocked count: 0 Waited count: 12 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 125 (Session-HouseKeeper-5b381830-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 122 (IPC Client (1055630025) connection to localhost/127.0.0.1:37541 from jenkins): State: TIMED_WAITING Blocked count: 1733 Waited count: 1733 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Thread 124 (IPC Parameter Sending Thread for localhost/127.0.0.1:37541): State: TIMED_WAITING Blocked count: 0 Waited count: 2539 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 126 (nioEventLoopGroup-4-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 127 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@649bbc64): State: TIMED_WAITING Blocked count: 0 Waited count: 1397 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 129 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 130 (IPC Server idle connection scanner for port 41019): State: TIMED_WAITING Blocked count: 1 Waited count: 71 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 132 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 140 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 135 (Command processor): State: WAITING Blocked count: 3 Waited count: 441 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@592a8c78 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 136 (BP-1735945732-172.17.0.2-1733400125338 heartbeating to localhost/127.0.0.1:37541): State: TIMED_WAITING Blocked count: 1618 Waited count: 1784 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 137 (pool-29-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 117 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@29d9e750): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 131 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 128 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 138 (IPC Server handler 0 on default port 41019): State: TIMED_WAITING Blocked count: 0 Waited count: 713 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 139 (IPC Server handler 1 on default port 41019): State: TIMED_WAITING Blocked count: 0 Waited count: 704 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 140 (IPC Server handler 2 on default port 41019): State: TIMED_WAITING Blocked count: 0 Waited count: 742 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 141 (IPC Server handler 3 on default port 41019): State: TIMED_WAITING Blocked count: 0 Waited count: 725 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 142 (IPC Server handler 4 on default port 41019): State: TIMED_WAITING Blocked count: 0 Waited count: 713 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 152 (pool-36-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 153 (qtp153931968-153): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f1fb042ac40.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 154 (qtp153931968-154-acceptor-0@22334216-ServerConnector@2fadfd2c{HTTP/1.1, (http/1.1)}{localhost:45127}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 155 (qtp153931968-155): State: TIMED_WAITING Blocked count: 0 Waited count: 12 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 156 (qtp153931968-156): State: TIMED_WAITING Blocked count: 0 Waited count: 12 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 157 (Session-HouseKeeper-eb557c8-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 158 (nioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 159 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@20315e3d): State: TIMED_WAITING Blocked count: 0 Waited count: 1396 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 161 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 162 (IPC Server idle connection scanner for port 44125): State: TIMED_WAITING Blocked count: 1 Waited count: 71 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 164 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 140 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 167 (Command processor): State: WAITING Blocked count: 0 Waited count: 340 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@523a2d3d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 168 (BP-1735945732-172.17.0.2-1733400125338 heartbeating to localhost/127.0.0.1:37541): State: TIMED_WAITING Blocked count: 1540 Waited count: 1782 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 169 (pool-38-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 151 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@65a8f63c): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 163 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 160 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 170 (IPC Server handler 0 on default port 44125): State: TIMED_WAITING Blocked count: 0 Waited count: 725 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 171 (IPC Server handler 1 on default port 44125): State: TIMED_WAITING Blocked count: 0 Waited count: 726 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 172 (IPC Server handler 2 on default port 44125): State: TIMED_WAITING Blocked count: 0 Waited count: 775 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 173 (IPC Server handler 3 on default port 44125): State: TIMED_WAITING Blocked count: 0 Waited count: 764 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 174 (IPC Server handler 4 on default port 44125): State: TIMED_WAITING Blocked count: 0 Waited count: 711 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 186 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/b25dcafd-a00c-c924-93f8-4d057d2ca201/cluster_25076363-b5ee-b0f5-182c-ca76f44a18a7/data/data1)): State: TIMED_WAITING Blocked count: 31 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 185 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/b25dcafd-a00c-c924-93f8-4d057d2ca201/cluster_25076363-b5ee-b0f5-182c-ca76f44a18a7/data/data3)): State: TIMED_WAITING Blocked count: 30 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 187 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/b25dcafd-a00c-c924-93f8-4d057d2ca201/cluster_25076363-b5ee-b0f5-182c-ca76f44a18a7/data/data4)): State: TIMED_WAITING Blocked count: 21 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 188 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/b25dcafd-a00c-c924-93f8-4d057d2ca201/cluster_25076363-b5ee-b0f5-182c-ca76f44a18a7/data/data2)): State: TIMED_WAITING Blocked count: 25 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 199 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/b25dcafd-a00c-c924-93f8-4d057d2ca201/cluster_25076363-b5ee-b0f5-182c-ca76f44a18a7/data/data2/current/BP-1735945732-172.17.0.2-1733400125338): State: TIMED_WAITING Blocked count: 2 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 198 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/b25dcafd-a00c-c924-93f8-4d057d2ca201/cluster_25076363-b5ee-b0f5-182c-ca76f44a18a7/data/data3/current/BP-1735945732-172.17.0.2-1733400125338): State: TIMED_WAITING Blocked count: 1 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 200 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/b25dcafd-a00c-c924-93f8-4d057d2ca201/cluster_25076363-b5ee-b0f5-182c-ca76f44a18a7/data/data4/current/BP-1735945732-172.17.0.2-1733400125338): State: TIMED_WAITING Blocked count: 1 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 201 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/b25dcafd-a00c-c924-93f8-4d057d2ca201/cluster_25076363-b5ee-b0f5-182c-ca76f44a18a7/data/data1/current/BP-1735945732-172.17.0.2-1733400125338): State: TIMED_WAITING Blocked count: 1 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 216 (pool-23-thread-1): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@32719d6b Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 215 (pool-15-thread-1): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@33b072d8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 219 (java.util.concurrent.ThreadPoolExecutor$Worker@250cff32[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 222 (java.util.concurrent.ThreadPoolExecutor$Worker@46124118[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 223 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/b25dcafd-a00c-c924-93f8-4d057d2ca201/cluster_25076363-b5ee-b0f5-182c-ca76f44a18a7/data/data5)): State: TIMED_WAITING Blocked count: 2 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 224 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/b25dcafd-a00c-c924-93f8-4d057d2ca201/cluster_25076363-b5ee-b0f5-182c-ca76f44a18a7/data/data6)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 230 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/b25dcafd-a00c-c924-93f8-4d057d2ca201/cluster_25076363-b5ee-b0f5-182c-ca76f44a18a7/data/data6/current/BP-1735945732-172.17.0.2-1733400125338): State: TIMED_WAITING Blocked count: 3 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 229 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/b25dcafd-a00c-c924-93f8-4d057d2ca201/cluster_25076363-b5ee-b0f5-182c-ca76f44a18a7/data/data5/current/BP-1735945732-172.17.0.2-1733400125338): State: TIMED_WAITING Blocked count: 3 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 234 (pool-33-thread-1): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@54189442 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 237 (java.util.concurrent.ThreadPoolExecutor$Worker@10dc8883[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 238 (FsDatasetAsyncDiskServiceFixer): State: TIMED_WAITING Blocked count: 0 Waited count: 24 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer.run(HBaseTestingUtil.java:576) Thread 240 (NIOServerCxnFactory.SelectorThread-0): State: RUNNABLE Blocked count: 2 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 241 (NIOServerCxnFactory.SelectorThread-1): State: RUNNABLE Blocked count: 6 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 242 (NIOServerCxnFactory.AcceptThread:localhost/127.0.0.1:49240): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.select(NIOServerCnxnFactory.java:205) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.run(NIOServerCnxnFactory.java:181) Thread 239 (ConnnectionExpirer): State: TIMED_WAITING Blocked count: 0 Waited count: 70 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.NIOServerCnxnFactory$ConnectionExpirerThread.run(NIOServerCnxnFactory.java:554) Thread 243 (SessionTracker): State: TIMED_WAITING Blocked count: 0 Waited count: 349 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.SessionTrackerImpl.run(SessionTrackerImpl.java:163) Thread 244 (SyncThread:0): State: WAITING Blocked count: 16 Waited count: 371 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7af3405f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.SyncRequestProcessor.run(SyncRequestProcessor.java:170) Thread 245 (ProcessThread(sid:0 cport:49240):): State: WAITING Blocked count: 2 Waited count: 452 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@76765824 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.PrepRequestProcessor.run(PrepRequestProcessor.java:142) Thread 246 (RequestThrottler): State: WAITING Blocked count: 0 Waited count: 492 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@665c782c Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.RequestThrottler.run(RequestThrottler.java:147) Thread 247 (NIOWorkerThread-1): State: WAITING Blocked count: 0 Waited count: 97 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7213fd1e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 258 (weak-ref-cleaner-strictcontextstorage): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.lang.ref.ReferenceQueue$Lock@1282aabb Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//io.opentelemetry.context.StrictContextStorage$PendingScopes.run(StrictContextStorage.java:269) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 259 (HBase-Metrics2-1): State: TIMED_WAITING Blocked count: 0 Waited count: 821 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 260 (HMaster-EventLoopGroup-1-1): State: RUNNABLE Blocked count: 41 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 261 (Time-limited test-SendThread(127.0.0.1:49240)): State: RUNNABLE Blocked count: 16 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.zookeeper.ClientCnxnSocketNIO.doTransport(ClientCnxnSocketNIO.java:332) app//org.apache.zookeeper.ClientCnxn$SendThread.run(ClientCnxn.java:1289) Thread 262 (Time-limited test-EventThread): State: WAITING Blocked count: 22 Waited count: 56 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5369e84d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:550) Thread 263 (NIOWorkerThread-2): State: WAITING Blocked count: 2 Waited count: 98 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7213fd1e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 264 (NIOWorkerThread-3): State: WAITING Blocked count: 2 Waited count: 98 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7213fd1e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 265 (NIOWorkerThread-4): State: WAITING Blocked count: 8 Waited count: 99 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7213fd1e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 266 (zk-event-processor-pool-0): State: WAITING Blocked count: 19 Waited count: 71 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3eff1433 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 267 (NIOWorkerThread-5): State: WAITING Blocked count: 3 Waited count: 97 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7213fd1e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 268 (NIOWorkerThread-6): State: WAITING Blocked count: 3 Waited count: 97 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7213fd1e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 269 (NIOWorkerThread-7): State: WAITING Blocked count: 4 Waited count: 98 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7213fd1e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 270 (NIOWorkerThread-8): State: WAITING Blocked count: 4 Waited count: 97 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7213fd1e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 271 (NIOWorkerThread-9): State: WAITING Blocked count: 8 Waited count: 97 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7213fd1e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 272 (NIOWorkerThread-10): State: WAITING Blocked count: 2 Waited count: 98 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7213fd1e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 273 (NIOWorkerThread-11): State: WAITING Blocked count: 7 Waited count: 97 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7213fd1e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 274 (NIOWorkerThread-12): State: WAITING Blocked count: 3 Waited count: 96 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7213fd1e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 275 (NIOWorkerThread-13): State: WAITING Blocked count: 2 Waited count: 97 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7213fd1e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 276 (NIOWorkerThread-14): State: WAITING Blocked count: 5 Waited count: 96 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7213fd1e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 277 (NIOWorkerThread-15): State: WAITING Blocked count: 6 Waited count: 97 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7213fd1e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 278 (NIOWorkerThread-16): State: WAITING Blocked count: 3 Waited count: 97 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7213fd1e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 280 (RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34947): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@39afb625 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 281 (RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34947): State: WAITING Blocked count: 184 Waited count: 707 Waiting on java.util.concurrent.Semaphore$NonfairSync@643ea268 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 282 (RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34947): State: WAITING Blocked count: 61 Waited count: 318 Waiting on java.util.concurrent.Semaphore$NonfairSync@78091522 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 283 (RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34947): State: WAITING Blocked count: 45 Waited count: 9858 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3e91d89c Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 284 (RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=34947): State: WAITING Blocked count: 0 Waited count: 4 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7a4b5ff5 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 285 (RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=34947): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7a4b5ff5 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 286 (RpcServer.replication.FPBQ.Fifo.handler=0,queue=0,port=34947): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@6b778f36 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 287 (RpcServer.replication.FPBQ.Fifo.handler=1,queue=0,port=34947): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@5b67377f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 288 (RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=34947): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@6b8251b7 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 289 (RpcServer.metaPriority.FPBQ.Fifo.handler=0,queue=0,port=34947): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.Semaphore$NonfairSync@4b960fc5 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 292 (Time-limited test.named-queue-events-pool-0): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1ccf37be Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//com.lmax.disruptor.BlockingWaitStrategy.waitFor(BlockingWaitStrategy.java:47) app//com.lmax.disruptor.ProcessingSequenceBarrier.waitFor(ProcessingSequenceBarrier.java:56) app//com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:159) app//com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 293 (MiniHBaseClusterRegionServer-EventLoopGroup-3-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 313 (MiniHBaseClusterRegionServer-EventLoopGroup-4-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 333 (MiniHBaseClusterRegionServer-EventLoopGroup-5-1): State: RUNNABLE Blocked count: 136 Waited count: 3 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 290 (M:0;80666a11a09f:34947): State: TIMED_WAITING Blocked count: 12 Waited count: 3960 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.FutureTask.awaitDone(FutureTask.java:444) java.base@17.0.11/java.util.concurrent.FutureTask.get(FutureTask.java:203) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.shutdown(AbstractFSWAL.java:1195) app//org.apache.hadoop.hbase.wal.AbstractFSWALProvider.shutdown0(AbstractFSWALProvider.java:162) app//org.apache.hadoop.hbase.wal.AbstractWALProvider$$Lambda$1435/0x00007f1fb12327c8.run(Unknown Source) app//org.apache.hadoop.hbase.wal.AbstractWALProvider.cleanup(AbstractWALProvider.java:287) app//org.apache.hadoop.hbase.wal.AbstractWALProvider.shutdown(AbstractWALProvider.java:299) app//org.apache.hadoop.hbase.wal.WALFactory.shutdown(WALFactory.java:341) app//org.apache.hadoop.hbase.master.region.MasterRegion.shutdownWAL(MasterRegion.java:140) app//org.apache.hadoop.hbase.master.region.MasterRegion.close(MasterRegion.java:206) app//org.apache.hadoop.hbase.master.HMaster.stopServiceThreads(HMaster.java:1819) app//org.apache.hadoop.hbase.master.HMaster.run(HMaster.java:631) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 354 (Monitor thread for TaskMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 70 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.monitoring.TaskMonitor$MonitorRunnable.run(TaskMonitor.java:325) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 356 (master/80666a11a09f:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 358 (master/80666a11a09f:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 360 (org.apache.hadoop.hdfs.PeerCache@23371030): State: TIMED_WAITING Blocked count: 0 Waited count: 232 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.PeerCache.run(PeerCache.java:253) app//org.apache.hadoop.hdfs.PeerCache.access$000(PeerCache.java:46) app//org.apache.hadoop.hdfs.PeerCache$1.run(PeerCache.java:124) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 378 (master:store-WAL-Roller): State: TIMED_WAITING Blocked count: 0 Waited count: 6939 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hbase.wal.AbstractWALRoller.run(AbstractWALRoller.java:180) Thread 395 (MiniHBaseClusterRegionServer-EventLoopGroup-5-2): State: RUNNABLE Blocked count: 62 Waited count: 2 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 396 (MiniHBaseClusterRegionServer-EventLoopGroup-5-3): State: RUNNABLE Blocked count: 128 Waited count: 3 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 409 (Idle-Rpc-Conn-Sweeper-pool-0): State: WAITING Blocked count: 0 Waited count: 168 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1983abfe Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 420 (SnapshotHandlerChoreCleaner): State: TIMED_WAITING Blocked count: 0 Waited count: 70 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 408 (RpcClient-timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 69349 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 433 (HMaster-EventLoopGroup-1-2): State: RUNNABLE Blocked count: 25 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 436 (HMaster-EventLoopGroup-1-3): State: RUNNABLE Blocked count: 44 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 455 (RegionServerTracker-0): State: WAITING Blocked count: 9 Waited count: 11 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@577766f0 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 470 (regionserver/80666a11a09f:0.procedureResultReporter): State: WAITING Blocked count: 18 Waited count: 37 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@bb7c9b2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 481 (regionserver/80666a11a09f:0.procedureResultReporter): State: WAITING Blocked count: 25 Waited count: 44 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@f6f5f61 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 479 (regionserver/80666a11a09f:0.procedureResultReporter): State: WAITING Blocked count: 7 Waited count: 15 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2aa5eb00 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 514 (MiniHBaseClusterRegionServer-EventLoopGroup-4-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 525 (MiniHBaseClusterRegionServer-EventLoopGroup-4-3): State: RUNNABLE Blocked count: 3 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 526 (region-location-0): State: WAITING Blocked count: 12 Waited count: 19 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@46095842 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 407 (Async-Client-Retry-Timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 69209 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 539 (RPCClient-NioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 548 (RPCClient-NioEventLoopGroup-6-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 549 (RPCClient-NioEventLoopGroup-6-3): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 582 (region-location-1): State: WAITING Blocked count: 4 Waited count: 7 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@46095842 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 583 (region-location-2): State: WAITING Blocked count: 2 Waited count: 5 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@46095842 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 985 (MutableQuantiles-0): State: TIMED_WAITING Blocked count: 0 Waited count: 972 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1048 (RPCClient-NioEventLoopGroup-6-4): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1088 (zk-permission-watcher-pool-0): State: WAITING Blocked count: 70 Waited count: 116 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3df3819 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1092 (RPCClient-NioEventLoopGroup-6-5): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1236 (RPCClient-NioEventLoopGroup-6-6): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1237 (RPCClient-NioEventLoopGroup-6-7): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1238 (RPCClient-NioEventLoopGroup-6-8): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1239 (MiniHBaseClusterRegionServer-EventLoopGroup-3-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1289 (RPCClient-NioEventLoopGroup-6-9): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1290 (RPCClient-NioEventLoopGroup-6-10): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1291 (RPCClient-NioEventLoopGroup-6-11): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1293 (RPCClient-NioEventLoopGroup-6-12): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1294 (RPCClient-NioEventLoopGroup-6-13): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1650 (Container metrics unregistration): State: WAITING Blocked count: 11 Waited count: 70 Waiting on java.util.TaskQueue@f18bb68 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 2011 (region-location-3): State: WAITING Blocked count: 3 Waited count: 7 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@46095842 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2012 (region-location-4): State: WAITING Blocked count: 3 Waited count: 7 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@46095842 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2134 (MiniHBaseClusterRegionServer-EventLoopGroup-3-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2142 (RPCClient-NioEventLoopGroup-6-14): State: RUNNABLE Blocked count: 2 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 11442 (AsyncFSWAL-1-hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/MasterData-prefix:80666a11a09f,34947,1733400129965): State: WAITING Blocked count: 0 Waited count: 2 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4b015450 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 11446 (process reaper): State: TIMED_WAITING Blocked count: 0 Waited count: 6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferStack.transfer(SynchronousQueue.java:401) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 11451 (Timer for 'JobHistoryServer' metrics system): State: TIMED_WAITING Blocked count: 0 Waited count: 9 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 11454 (WAL-Shutdown-0): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.awaitTermination(ThreadPoolExecutor.java:1464) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.doShutdown(AbstractFSWAL.java:2117) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$2.call(AbstractFSWAL.java:1179) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$2.call(AbstractFSWAL.java:1174) java.base@17.0.11/java.util.concurrent.FutureTask.run(FutureTask.java:264) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 11455 (Close-WAL-Writer-0): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:166) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$1421/0x00007f1fb122ad98.run(Unknown Source) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) 2024-12-05T12:13:50,043 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=1 on file=hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/MasterData/WALs/80666a11a09f,34947,1733400129965/80666a11a09f%2C34947%2C1733400129965.1733400131616 after 4001ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-05T12:13:51,036 ERROR [WAL-Shutdown-0 {}] wal.AbstractFSWAL(2118): We have waited 5 seconds but the close of async writer doesn't complete.Please check the status of underlying filesystem or increase the wait time by the config "hbase.wal.async.wait.on.shutdown.seconds" 2024-12-05T12:13:51,036 INFO [M:0;80666a11a09f:34947 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-12-05T12:13:51,036 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-05T12:13:51,037 INFO [M:0;80666a11a09f:34947 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:34947 2024-12-05T12:13:51,037 INFO [M:0;80666a11a09f:34947 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-05T12:13:51,045 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37541/user/jenkins/test-data/bb963e1b-29e4-bb26-70b2-6783b3090300/MasterData/WALs/80666a11a09f,34947,1733400129965/80666a11a09f%2C34947%2C1733400129965.1733400131616 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 12 more 2024-12-05T12:13:51,186 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34947-0x101a6a890970000, quorum=127.0.0.1:49240, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-05T12:13:51,186 INFO [M:0;80666a11a09f:34947 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-05T12:13:51,186 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34947-0x101a6a890970000, quorum=127.0.0.1:49240, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-05T12:13:51,227 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@1865ebea{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-05T12:13:51,228 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@2fadfd2c{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-05T12:13:51,228 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-05T12:13:51,228 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@49217e61{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-05T12:13:51,228 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@33104ee0{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/b25dcafd-a00c-c924-93f8-4d057d2ca201/hadoop.log.dir/,STOPPED} 2024-12-05T12:13:51,230 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-05T12:13:51,231 WARN [BP-1735945732-172.17.0.2-1733400125338 heartbeating to localhost/127.0.0.1:37541 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-05T12:13:51,231 WARN [BP-1735945732-172.17.0.2-1733400125338 heartbeating to localhost/127.0.0.1:37541 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1735945732-172.17.0.2-1733400125338 (Datanode Uuid bda4ce52-ded1-49f3-a81c-39967248aef2) service to localhost/127.0.0.1:37541 2024-12-05T12:13:51,231 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-05T12:13:51,232 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/b25dcafd-a00c-c924-93f8-4d057d2ca201/cluster_25076363-b5ee-b0f5-182c-ca76f44a18a7/data/data5/current/BP-1735945732-172.17.0.2-1733400125338 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-05T12:13:51,233 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/b25dcafd-a00c-c924-93f8-4d057d2ca201/cluster_25076363-b5ee-b0f5-182c-ca76f44a18a7/data/data6/current/BP-1735945732-172.17.0.2-1733400125338 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-05T12:13:51,233 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-05T12:13:51,242 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@70601096{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-05T12:13:51,243 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@68f62052{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-05T12:13:51,243 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-05T12:13:51,243 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@8c924b4{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-05T12:13:51,243 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@428a9356{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/b25dcafd-a00c-c924-93f8-4d057d2ca201/hadoop.log.dir/,STOPPED} 2024-12-05T12:13:51,248 WARN [BP-1735945732-172.17.0.2-1733400125338 heartbeating to localhost/127.0.0.1:37541 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-05T12:13:51,248 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-05T12:13:51,248 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-05T12:13:51,248 WARN [BP-1735945732-172.17.0.2-1733400125338 heartbeating to localhost/127.0.0.1:37541 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1735945732-172.17.0.2-1733400125338 (Datanode Uuid 7003692e-dcbf-496a-a497-c6b12c3641e7) service to localhost/127.0.0.1:37541 2024-12-05T12:13:51,250 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-05T12:13:51,250 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/b25dcafd-a00c-c924-93f8-4d057d2ca201/cluster_25076363-b5ee-b0f5-182c-ca76f44a18a7/data/data3/current/BP-1735945732-172.17.0.2-1733400125338 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-05T12:13:51,250 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/b25dcafd-a00c-c924-93f8-4d057d2ca201/cluster_25076363-b5ee-b0f5-182c-ca76f44a18a7/data/data4/current/BP-1735945732-172.17.0.2-1733400125338 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-05T12:13:51,259 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@22e5dde{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-05T12:13:51,260 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@3cbfa094{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-05T12:13:51,260 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-05T12:13:51,260 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6b6c5d7e{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-05T12:13:51,260 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@8da11ec{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/b25dcafd-a00c-c924-93f8-4d057d2ca201/hadoop.log.dir/,STOPPED} 2024-12-05T12:13:51,264 WARN [BP-1735945732-172.17.0.2-1733400125338 heartbeating to localhost/127.0.0.1:37541 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-05T12:13:51,264 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-05T12:13:51,264 WARN [BP-1735945732-172.17.0.2-1733400125338 heartbeating to localhost/127.0.0.1:37541 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1735945732-172.17.0.2-1733400125338 (Datanode Uuid 1f0bfc6e-34af-4444-98e7-cd02286dedfa) service to localhost/127.0.0.1:37541 2024-12-05T12:13:51,264 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-05T12:13:51,265 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-05T12:13:51,266 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/b25dcafd-a00c-c924-93f8-4d057d2ca201/cluster_25076363-b5ee-b0f5-182c-ca76f44a18a7/data/data1/current/BP-1735945732-172.17.0.2-1733400125338 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-05T12:13:51,267 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/b25dcafd-a00c-c924-93f8-4d057d2ca201/cluster_25076363-b5ee-b0f5-182c-ca76f44a18a7/data/data2/current/BP-1735945732-172.17.0.2-1733400125338 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-05T12:13:51,281 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@150ffd7b{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-05T12:13:51,282 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@322d032a{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-05T12:13:51,282 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-05T12:13:51,287 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@15a5d53b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-05T12:13:51,287 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@72770802{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/b25dcafd-a00c-c924-93f8-4d057d2ca201/hadoop.log.dir/,STOPPED} 2024-12-05T12:13:51,301 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-12-05T12:13:51,560 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down